source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
schedule-modifiers-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
foo (void)
{
int i;
#pragma omp for simd schedule (simd, simd: static, 5)
for (i = 0; i < 64; i++)
;
#pragma omp for simd schedule (monotonic, simd: static)
for (i = 0; i < 64; i++)
;
#pragma omp for simd schedule (simd , monotonic : static, 6)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic, monotonic : static, 7)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic, nonmonotonic : dynamic)
for (i = 0; i < 64; i++)
;
#pragma omp for simd schedule (nonmonotonic , simd : dynamic, 3)
for (i = 0; i < 64; i++)
;
#pragma omp for simd schedule (nonmonotonic,simd:guided,4)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic: static, 2)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic : static)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic : dynamic)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic : dynamic, 3)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic : guided)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic : guided, 7)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic : runtime)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic : auto)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic : dynamic)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic : dynamic, 3)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic : guided)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic : guided, 7)
for (i = 0; i < 64; i++)
;
}
void
bar (void)
{
int i;
#pragma omp for schedule (nonmonotonic: static, 2)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic : static)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic : runtime)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic : auto)
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (nonmonotonic : static) ordered /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */
for (i = 0; i < 64; i++)
#pragma omp ordered
;
#pragma omp for ordered schedule (nonmonotonic: static, 4) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */
for (i = 0; i < 64; i++)
#pragma omp ordered
;
#pragma omp for schedule (nonmonotonic : dynamic) ordered /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */
for (i = 0; i < 64; i++)
#pragma omp ordered
;
#pragma omp for ordered schedule(nonmonotonic : dynamic, 5) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */
for (i = 0; i < 64; i++)
#pragma omp ordered
;
#pragma omp for schedule (nonmonotonic : guided) ordered(1) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */
for (i = 0; i < 64; i++)
{
#pragma omp ordered depend(sink: i - 1)
#pragma omp ordered depend(source)
}
#pragma omp for ordered(1) schedule(nonmonotonic : guided, 2) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */
for (i = 0; i < 64; i++)
{
#pragma omp ordered depend(source)
#pragma omp ordered depend(sink: i - 1)
}
#pragma omp for schedule(nonmonotonic : runtime) ordered(1) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */
for (i = 0; i < 64; i++)
{
#pragma omp ordered depend(source)
#pragma omp ordered depend(sink: i - 1)
}
#pragma omp for schedule (nonmonotonic , monotonic : dynamic) /* { dg-error "both .monotonic. and .nonmonotonic. modifiers specified" } */
for (i = 0; i < 64; i++)
;
#pragma omp for schedule (monotonic,nonmonotonic:dynamic) /* { dg-error "both .monotonic. and .nonmonotonic. modifiers specified" } */
for (i = 0; i < 64; i++)
;
}
|
sum.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 120000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(float *X) {
for (int i = 0; i<N; i++) {
X[i] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
float sum(float *X) {
float result = 0;
#pragma omp simd
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
// Debug functions
float sum_serial(float *X) {
float result = 0;
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
void print_vector(float *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
int main(int argc, char **argv) {
//Set everything up
float *X = malloc(sizeof(float)*N);
float result, result_serial;
srand(time(NULL));
init(X);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
result = sum(X);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
result_serial = sum_serial(X);
double t_serial = (read_timer() - start_serial);
print_vector(X);
puts("=\n");
printf("SIMD: %f\n", result);
puts("---------------------------------");
printf("Serial: %f\n", result_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %f\n", result_serial - result);
free(X);
return 0;
}
|
detector.c | #include "darknet.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
//struct timeval start, stop;
char buff[256];
char *input = buff;
int j;
float nms=.45;
#ifdef NNPACK
nnp_initialize();
#ifdef QPU_GEMM
net->threadpool = pthreadpool_create(1);
#else
net->threadpool = pthreadpool_create(4);
#endif
#endif
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
#ifdef NNPACK
image im = load_image_thread(input, 0, 0, net->c, net->threadpool);
image sized = letterbox_image_thread(im, net->w, net->h, net->threadpool);
#else
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
#endif
layer l = net->layers[net->n-1];
float *X = sized.data;
/*gettimeofday(&start, 0);
network_predict(net, X);
gettimeofday(&stop, 0);
printf("%s: Predicted in %ld ms.\n", input, (stop.tv_sec * 1000 + stop.tv_usec / 1000) - (start.tv_sec * 1000 + start.tv_usec / 1000));
get_region_boxes(l, im.w, im.h, net->w, net->h, thresh, probs, boxes, masks, 0, 0, hier_thresh, 1);*/
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
printf("%d\n", nboxes);
for(int i=0;i<nboxes;i++){
printf("Box %d at (x,y)=(%f,%f) with (w,h)=(%f,%f)\n", i, dets[i].bbox.x, dets[i].bbox.y, dets[i].bbox.w, dets[i].bbox.h);
}
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
cvNamedWindow("predictions", CV_WINDOW_NORMAL);
if(fullscreen){
cvSetWindowProperty("predictions", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
}
show_image(im, "predictions");
cvWaitKey(0);
cvDestroyAllWindows();
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
#ifdef NNPACK
pthreadpool_destroy(net->threadpool);
nnp_deinitialize();
#endif
free_network(net);
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .24);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
GB_binop__band_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__band_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__band_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__band_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint8)
// A*D function (colscale): GB (_AxD__band_uint8)
// D*A function (rowscale): GB (_DxB__band_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__band_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__band_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint8)
// C=scalar+B GB (_bind1st__band_uint8)
// C=scalar+B' GB (_bind1st_tran__band_uint8)
// C=A+scalar GB (_bind2nd__band_uint8)
// C=A'+scalar GB (_bind2nd_tran__band_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_UINT8 || GxB_NO_BAND_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__band_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__band_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__band_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__band_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__band_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_fp64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_int64
// op(A') function: GB_tran__lnot_fp64_int64
// C type: double
// A type: int64_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_int64
(
double *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
private-1.c | extern void abort (void);
int a = 18;
void
f1 (int i, int j, int k)
{
int l = 6, m = 7, n = 8;
#pragma omp parallel private(j, m) shared(k, n) firstprivate(i, l) \
num_threads(1)
{
j = 6;
m = 5;
if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9)
#pragma omp atomic
k++;
}
if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9)
abort ();
}
int v1 = 1, v2 = 2, v5 = 5;
int err;
void
f2 (void)
{
int v3 = 3;
#pragma omp sections private (v1) firstprivate (v2)
{
#pragma omp section
{
int v4 = 4;
v1 = 7;
#pragma omp parallel num_threads(1) firstprivate(v1, v2, v3, v4)
{
if (++v1 != 8 || ++v2 != 3 || ++v3 != 4 || ++v4 != 5 || ++v5 != 6)
err = 1;
}
if (v1 != 7 || v2 != 2 || v3 != 3 || v4 != 4 || v5 != 6)
abort ();
if (err)
abort ();
}
}
}
int
main (void)
{
f1 (8, 26, 0);
f2 ();
return 0;
}
|
GB_binop__ge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint8)
// A*D function (colscale): GB (_AxD__ge_uint8)
// D*A function (rowscale): GB (_DxB__ge_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint8)
// C=scalar+B GB (_bind1st__ge_uint8)
// C=scalar+B' GB (_bind1st_tran__ge_uint8)
// C=A+scalar GB (_bind2nd__ge_uint8)
// C=A'+scalar GB (_bind2nd_tran__ge_uint8)
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT8 || GxB_NO_GE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
PropagationMPlex.h | #ifndef _propagation_mplex_
#define _propagation_mplex_
#include "Matrix.h"
namespace mkfit {
inline void squashPhiMPlex(MPlexLV& par, const int N_proc)
{
#pragma omp simd
for (int n = 0; n < NN; ++n) {
if (par(n, 4, 0) >= Config::PI) par(n, 4, 0) -= Config::TwoPI;
if (par(n, 4, 0) < -Config::PI) par(n, 4, 0) += Config::TwoPI;
}
}
inline void squashPhiMPlexGeneral(MPlexLV& par, const int N_proc)
{
#pragma omp simd
for (int n = 0; n < NN; ++n) {
par(n, 4, 0) -= std::floor(0.5f*Config::InvPI*(par(n, 4, 0)+Config::PI)) * Config::TwoPI;
}
}
void propagateLineToRMPlex(const MPlexLS &psErr, const MPlexLV& psPar,
const MPlexHS &msErr, const MPlexHV& msPar,
MPlexLS &outErr, MPlexLV& outPar,
const int N_proc);
void propagateHelixToRMPlex(const MPlexLS &inErr, const MPlexLV& inPar,
const MPlexQI &inChg, const MPlexQF& msRad,
MPlexLS &outErr, MPlexLV& outPar,
const int N_proc, const PropagationFlags pflags,
const MPlexQI *noMatEffPtr=nullptr);
void helixAtRFromIterativeCCSFullJac(const MPlexLV& inPar, const MPlexQI& inChg, const MPlexQF &msRad,
MPlexLV& outPar, MPlexLL& errorProp,
const int N_proc);
void helixAtRFromIterativeCCS(const MPlexLV& inPar, const MPlexQI& inChg, const MPlexQF &msRad,
MPlexLV& outPar, MPlexLL& errorProp,
MPlexQI& outFailFlag,
const int N_proc, const PropagationFlags pflags);
void propagateHelixToZMPlex(const MPlexLS &inErr, const MPlexLV& inPar,
const MPlexQI &inChg, const MPlexQF& msZ,
MPlexLS &outErr, MPlexLV& outPar,
const int N_proc, const PropagationFlags pflags,
const MPlexQI *noMatEffPtr=nullptr);
void helixAtZ(const MPlexLV& inPar, const MPlexQI& inChg, const MPlexQF &msZ,
MPlexLV& outPar, MPlexLL& errorProp,
const int N_proc, const PropagationFlags pflags);
void applyMaterialEffects(const MPlexQF &hitsRl, const MPlexQF& hitsXi, const MPlexQF& propSign,
MPlexLS &outErr, MPlexLV& outPar,
const int N_proc, const bool isBarrel);
} // end namespace mkfit
#endif
|
GB_nvec_nonempty.c | //------------------------------------------------------------------------------
// GB_nvec_nonempty: count the number of non-empty vectors
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// All pending tuples are ignored. If a vector has all zombies it is still
// counted as non-empty.
#include "GB.h"
int64_t GB_nvec_nonempty // return # of non-empty vectors
(
const GrB_Matrix A, // input matrix to examine
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (A != NULL) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
//--------------------------------------------------------------------------
// trivial case
//--------------------------------------------------------------------------
if (GB_NNZ (A) == 0)
{
return (0) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// count the non-empty columns
//--------------------------------------------------------------------------
int64_t nvec_nonempty = 0 ;
const int64_t *restrict Ap = A->p ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:nvec_nonempty)
for (int64_t k = 0 ; k < anvec ; k++)
{
if (Ap [k] < Ap [k+1]) nvec_nonempty++ ;
}
ASSERT (nvec_nonempty >= 0 && nvec_nonempty <= A->vdim) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (nvec_nonempty) ;
}
|
main.c | #include <omp.h>
#include <stdio.h>
__attribute__ ((constructor))
void a_constructor()
{
#pragma omp parallel
printf("hello from thread %d\n", omp_get_num_threads());
}
int main(int argc, char **argv)
{
printf("hello from main\n");
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "coders/coders-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
MagickBooleanType
supported;
PixelChannel
channel;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(QuantumScale*
GetPixelAlpha(image,q)*opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,ClampToQuantum((double) QuantumRange*
GetPixelAlpha(image,q)/(MagickRealType) opacity),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const PixelChannel channel,
const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (channel == GrayPixelChannel)
{
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
SetPixelIndex(image,index,q);
}
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (channel == AlphaPixelChannel)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
}
else
SetPixelChannel(image,channel,pixel,q);
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,const ssize_t row,
const PixelChannel channel,const unsigned char *pixels,
ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum(((MagickRealType) QuantumRange)*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channel,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channel,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const PixelChannel channel,
ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,y,channel,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,
const PixelChannel channel,MagickOffsetType *sizes,
ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,y,channel,pixels,exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,
const PixelChannel channel,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,y,channel,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel_index,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
end_offset,
offset;
MagickBooleanType
status;
PixelChannel
channel;
end_offset=(MagickOffsetType) layer_info->channel_info[channel_index].size-2;
if (layer_info->channel_info[channel_index].supported == MagickFalse)
{
(void) SeekBlob(image,end_offset,SEEK_CUR);
return(MagickTrue);
}
channel_image=image;
channel=layer_info->channel_info[channel_index].channel;
mask=(Image *) NULL;
if (channel == ReadMaskPixelChannel)
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)) ||
(layer_info->mask.page.width < 1) ||
(layer_info->mask.page.height < 1))
{
(void) SeekBlob(image,end_offset,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
channel=GrayPixelChannel;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,channel,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,channel,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,channel,compression,
(const size_t) end_offset,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+end_offset,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType GetPixelChannelFromPsdIndex(const PSDInfo *psd_info,
ssize_t index,PixelChannel *channel)
{
*channel=RedPixelChannel;
switch (psd_info->mode)
{
case BitmapMode:
case IndexedMode:
case GrayscaleMode:
{
if (index == 1)
index=-1;
else if (index > 1)
index=StartMetaPixelChannel+index-2;
break;
}
case LabMode:
case MultichannelMode:
case RGBMode:
{
if (index == 3)
index=-1;
else if (index > 3)
index=StartMetaPixelChannel+index-4;
break;
}
case CMYKMode:
{
if (index == 4)
index=-1;
else if (index > 4)
index=StartMetaPixelChannel+index-5;
break;
}
}
if ((index < -2) || (index >= MaxPixelChannels))
return(MagickFalse);
if (index == -1)
*channel=AlphaPixelChannel;
else if (index == -2)
*channel=ReadMaskPixelChannel;
else
*channel=(PixelChannel) index;
return(MagickTrue);
}
static void SetPsdMetaChannels(Image *image,const PSDInfo *psd_info,
const unsigned short channels,ExceptionInfo *exception)
{
ssize_t
number_meta_channels;
number_meta_channels=(ssize_t) channels-psd_info->min_channels;
if (image->alpha_trait == BlendPixelTrait)
number_meta_channels--;
if (number_meta_channels > 0)
(void) SetPixelMetaChannels(image,(size_t) number_meta_channels,exception);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
SetPsdMetaChannels(layer_info->image,psd_info,layer_info->channels,exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
PixelChannel
channel;
if (layer_info->channel_info[i].supported == MagickFalse)
continue;
channel=layer_info->channel_info[i].channel;
if ((i == 0) && (psd_info->mode == IndexedMode) &&
(channel != RedPixelChannel))
return(MagickFalse);
if (channel == AlphaPixelChannel)
{
channel_type|=AlphaChannel;
continue;
}
if (channel == RedPixelChannel)
channel_type&=~RedChannel;
else if (channel == GreenPixelChannel)
channel_type&=~GreenChannel;
else if (channel == BluePixelChannel)
channel_type&=~BlueChannel;
else if (channel == BlackPixelChannel)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
count,
index,
i,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].supported=GetPixelChannelFromPsdIndex(
psd_info,(ssize_t) ReadBlobSignedShort(image),
&layer_info[i].channel_info[j].channel);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].channel,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (layer_info[i].channel_info[j].channel == AlphaPixelChannel)
{
layer_info[i].image->alpha_trait=BlendPixelTrait;
break;
}
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,ReadPolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
SetPsdMetaChannels(image,psd_info,psd_info->channels,exception);
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
PixelChannel
channel;
status=GetPixelChannelFromPsdIndex(psd_info,i,&channel);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"MaximumChannelsExceeded","'%.20g'",(double) i);
break;
}
if (compression == RLE)
status=ReadPSDChannelRLE(image,channel,sizes+(i*image->rows),exception);
else
status=ReadPSDChannelRaw(image,channel,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
switch (psd_info.mode)
{
case LabMode:
{
(void) SetImageColorspace(image,LabColorspace,exception);
break;
}
case CMYKMode:
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case BitmapMode:
case GrayscaleMode:
case DuotoneMode:
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case IndexedMode:
{
psd_info.min_channels=1;
break;
}
case MultichannelMode:
{
if ((psd_info.channels > 0) && (psd_info.channels < 3))
{
psd_info.min_channels=psd_info.channels;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
break;
}
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
const char
*option;
Image
*next;
MagickBooleanType
replicate_profile;
option=GetImageOption(image_info,"psd:replicate-profile");
replicate_profile=IsStringTrue(option);
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
{
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
if (replicate_profile == MagickFalse)
break;
}
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,WritePolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) &&
(image_info->type != TrueColorAlphaType) &&
(image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
const char
*option;
CompressionType
compression;
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
option=GetImageOption(image_info,"psd:write-layers");
if (IsStringFalse(option) != MagickTrue)
{
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
(void) WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
(void) WriteBlobMSBLong(image,0); /* user mask data */
}
/*
Write composite image.
*/
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
zraster.c | #include "../include/GL/gl.h"
#include "../include/zbuffer.h"
#include "msghandling.h"
#include "zgl.h"
static void gl_vertex_transform_raster(GLVertex* v) {
GLContext* c = gl_get_context();
{
/* no eye coordinates needed, no normal */
/* NOTE: W = 1 is assumed */
GLfloat* m = &c->matrix_model_projection.m[0][0];
v->pc.X = (v->coord.X * m[0] + v->coord.Y * m[1] + v->coord.Z * m[2] + m[3]);
v->pc.Y = (v->coord.X * m[4] + v->coord.Y * m[5] + v->coord.Z * m[6] + m[7]);
v->pc.Z = (v->coord.X * m[8] + v->coord.Y * m[9] + v->coord.Z * m[10] + m[11]);
if (c->matrix_model_projection_no_w_transform) {
v->pc.W = m[15];
} else {
v->pc.W = (v->coord.X * m[12] + v->coord.Y * m[13] + v->coord.Z * m[14] + m[15]);
}
m = &c->matrix_stack_ptr[0]->m[0][0];
v->ec.X = (v->coord.X * m[0] + v->coord.Y * m[1] + v->coord.Z * m[2] + m[3]);
v->ec.Y = (v->coord.X * m[4] + v->coord.Y * m[5] + v->coord.Z * m[6] + m[7]);
v->ec.Z = (v->coord.X * m[8] + v->coord.Y * m[9] + v->coord.Z * m[10] + m[11]);
v->ec.W = (v->coord.X * m[12] + v->coord.Y * m[13] + v->coord.Z * m[14] + m[15]);
}
v->clip_code = gl_clipcode(v->pc.X, v->pc.Y, v->pc.Z, v->pc.W);
}
void glRasterPos4f(GLfloat x, GLfloat y, GLfloat z, GLfloat w) {
GLParam p[5];
p[0].op = OP_RasterPos;
p[1].f = x;
p[2].f = y;
p[3].f = z;
p[4].f = w;
gl_add_op(p);
}
void glopRasterPos(GLParam* p) {
GLContext* c = gl_get_context();
GLVertex v;
v.coord.X = p[1].f;
v.coord.Y = p[2].f;
v.coord.Z = p[3].f;
v.coord.W = p[4].f;
gl_vertex_transform_raster(&v);
if (v.clip_code == 0) {
{
GLfloat winv = 1.0 / v.pc.W;
v.zp.x = (GLint)(v.pc.X * winv * c->viewport.scale.X + c->viewport.trans.X);
v.zp.y = (GLint)(v.pc.Y * winv * c->viewport.scale.Y + c->viewport.trans.Y);
v.zp.z = (GLint)(v.pc.Z * winv * c->viewport.scale.Z + c->viewport.trans.Z);
}
c->rasterpos.v[0] = v.zp.x;
c->rasterpos.v[1] = v.zp.y;
c->rastervertex = v;
/* c->rasterpos.v[2] = v.zp.z;*/
c->rasterpos_zz = v.zp.z >> ZB_POINT_Z_FRAC_BITS;
c->rasterposvalid = 1;
} else
c->rasterposvalid = 0;
}
void glRasterPos2f(GLfloat x, GLfloat y) { glRasterPos4f(x, y, 0, 1); }
void glRasterPos3f(GLfloat x, GLfloat y, GLfloat z) { glRasterPos4f(x, y, z, 1); }
void glRasterPos2fv(GLfloat* v) { glRasterPos2f(v[0], v[1]); }
void glRasterPos3fv(GLfloat* v) { glRasterPos3f(v[0], v[1], v[2]); }
void glRasterPos4fv(GLfloat* v) { glRasterPos4f(v[0], v[1], v[2], v[3]); }
void glDrawPixels(GLsizei width, GLsizei height, GLenum format, GLenum type, void* data) {
/* TODO: Come up with a clever scheme for storing the data to avoid pointer dependency. */
#if TGL_FEATURE_RENDER_BITS == 32
if (type != GL_UNSIGNED_INT && type != GL_UNSIGNED_INT_8_8_8_8) {
tgl_warning("\nERROR: Incorrect type for glDrawPixels. It MUST be GL_UNSIGNED_INT or GL_UNSIGNED_INT_8_8_8_8, A R G B!");
return;
}
#elif TGL_FEATURE_RENDER_BITS == 16
if (type != GL_UNSIGNED_SHORT && type != GL_UNSIGNED_SHORT_5_6_5) {
tgl_warning("\nERROR: Incorrect type for glDrawPixels. it MUST be GL_UNSIGNED_SHORT or GL_UNSIGNED_SHORT_5_6_5, R5 G6 B5!");
return;
}
#else
#error "Bad TGL_FEATURE_RENDER_BITS"
#endif
if (format != GL_RGB) {
tgl_warning("\nERROR: Incorrect format for glDrawPixels.");
return;
}
GLParam p[6];
p[0].op = OP_DrawPixels;
p[1].i = width;
p[2].i = height;
p[3].p = data;
gl_add_op(p);
}
#define ZCMP(z, zpix) (!(zbdt) || z >= (zpix))
#define CLIPTEST(_x, _y, _w, _h) ((0 <= _x) && (_w > _x) && (0 <= _y) && (_h > _y))
void glopDrawPixels(GLParam* p) {
GLContext* c = gl_get_context();
GLint sy, sx, ty, tx;
GLint w = p[1].i;
GLint h = p[2].i;
V4 rastpos = c->rasterpos;
ZBuffer* zb = c->zb;
PIXEL* d = p[3].p;
PIXEL* pbuf = zb->pbuf;
GLushort* zbuf = zb->zbuf;
GLubyte zbdw = zb->depth_write;
GLubyte zbdt = zb->depth_test;
GLint tw = zb->xsize;
GLint th = zb->ysize;
GLfloat pzoomx = c->pzoomx;
GLfloat pzoomy = c->pzoomy;
GLint zz = c->rasterpos_zz;
#if TGL_FEATURE_BLEND_DRAW_PIXELS == 1
TGL_BLEND_VARS
#endif
#if TGL_FEATURE_BLEND == 1
#if TGL_FEATURE_BLEND_DRAW_PIXELS == 1
GLuint zbeb = zb->enable_blend;
#endif
#endif
if (!c->rasterposvalid)return;
#if TGL_FEATURE_ALT_RENDERMODES == 1
if (c->render_mode == GL_SELECT) {
gl_add_select(zz, zz);
return;
} else if (c->render_mode == GL_FEEDBACK) {
gl_add_feedback(GL_DRAW_PIXEL_TOKEN, &(c->rastervertex), NULL, NULL, 0);
return;
}
#endif
#if TGL_FEATURE_MULTITHREADED_DRAWPIXELS == 1
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (sy = 0; sy < h; sy++)
for (sx = 0; sx < w; sx++) {
PIXEL col = d[sy * w + sx];
V4 rastoffset;
rastoffset.v[0] = rastpos.v[0] + (GLfloat)sx * pzoomx;
rastoffset.v[1] = rastpos.v[1] - ((GLfloat)(h - sy) * pzoomy);
rastoffset.v[2] = rastoffset.v[0] + pzoomx;
rastoffset.v[3] = rastoffset.v[1] - pzoomy;
for (ty = rastoffset.v[1]; (GLfloat)ty > rastoffset.v[3]; ty--)
for (tx = rastoffset.v[0]; (GLfloat)tx < rastoffset.v[2]; tx++)
if (CLIPTEST(tx, ty, tw, th)) {
GLushort* pz = zbuf + (ty * tw + tx);
if (ZCMP(zz, *pz)) {
#if TGL_FEATURE_BLEND == 1
#if TGL_FEATURE_BLEND_DRAW_PIXELS == 1
if (!zbeb)
pbuf[tx + ty * tw] = col;
else
TGL_BLEND_FUNC(col, pbuf[tx + ty * tw])
#else
pbuf[tx + ty * tw] = col;
#endif
#else
pbuf[tx + ty * tw] = col;
#endif
if (zbdw)
*pz = zz;
}
}
}
#else
for (sy = 0; sy < h; sy++)
for (sx = 0; sx < w; sx++) {
PIXEL col = d[sy * w + sx];
V4 rastoffset;
rastoffset.v[0] = rastpos.v[0] + (GLfloat)sx * pzoomx;
rastoffset.v[1] = rastpos.v[1] - ((GLfloat)(h - sy) * pzoomy);
rastoffset.v[2] = rastoffset.v[0] + pzoomx;
rastoffset.v[3] = rastoffset.v[1] - pzoomy;
for (ty = rastoffset.v[1]; (GLfloat)ty > rastoffset.v[3]; ty--)
for (tx = rastoffset.v[0]; (GLfloat)tx < rastoffset.v[2]; tx++)
if (CLIPTEST(tx, ty, tw, th)) {
GLushort* pz = zbuf + (ty * tw + tx);
if (ZCMP(zz, *pz)) {
#if TGL_FEATURE_BLEND == 1
#if TGL_FEATURE_BLEND_DRAW_PIXELS == 1
if (!zbeb)
pbuf[tx + ty * tw] = col;
else
TGL_BLEND_FUNC(col, pbuf[tx + ty * tw])
#else
pbuf[tx + ty * tw] = col;
#endif
#else
pbuf[tx + ty * tw] = col;
#endif
if (zbdw)
*pz = zz;
}
}
}
#endif
}
void glPixelZoom(GLfloat x, GLfloat y) {
GLParam p[3];
p[0].op = OP_PixelZoom;
p[1].f = x;
p[2].f = y;
gl_add_op(p);
}
void glopPixelZoom(GLParam* p) {
GLContext* c = gl_get_context();
c->pzoomx = p[1].f;
c->pzoomy = p[2].f;
}
|
ast-dump-openmp-target-teams-distribute-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:4:9, col:54>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:10:9, col:54>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:17:9, col:66>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:24:9, col:66>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:31:9, col:66>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:64> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
OpenMP_ArrayRankSort.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(){
int x, n = 10;
int a[n], b[n];
int i, j;
srand(1234);
for(i = 0 ; i < n ; i++)
a[i] = rand()%1000;
#pragma omp parallel for
for (i = 0; i < n; i++) { /* for each number */
x = 0;
for (j = 0; j < n; j++) /* count number less than it */
if ((a[i] > a[j]) || (a[i] == a[j]) && (j <i))
x++;
b[x] = a[i]; /* copy number into correct place */
}
for (i = 0; i < n ; i++)
printf("A[%d]:%d\n", i, a[i]);
for (i = 0; i < n ; i++)
printf("B[%d]:%d\n", i, b[i]);
} |
array_transpose.h | // Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef _H_ARRAY_TRANSPOSE
#define _H_ARRAY_TRANSPOSE
#include "secure_c_wrapper.h"
#include "affinity_policy.h"
template <int branch, typename T>
static inline void inner_transpose_template(unsigned int tileSize,
unsigned int *inputDims,
const T *input,
unsigned int *outputDims,
T *output,
unsigned int *transposeDims,
int inputDimsNum,
int outputDimsNum,
unsigned int outputSize,
int sizeInnerIndex)
{
#ifdef _USE_OPENMP
#pragma omp parallel num_threads(OMP_NUM_THREADS)
#endif
{
std::vector<unsigned int> inputLocalIndex(inputDimsNum);
#ifdef _USE_OPENMP
#pragma omp for
#endif
for (unsigned int i = 0; i < outputSize; i++) {
unsigned int outputIndex = i;
for (int j = sizeInnerIndex; j < outputDimsNum; j++) {
unsigned int value = outputIndex % outputDims[j];
outputIndex /= outputDims[j];
inputLocalIndex[inputDimsNum - 1 - transposeDims[outputDimsNum - 1 - j]] = value;
}
unsigned int inputIndex = 0;
for (int j = inputDimsNum - 1; j > sizeInnerIndex; j--) {
inputIndex = (inputIndex + inputLocalIndex[j]) * inputDims[j - 1];
}
inputIndex += inputLocalIndex[sizeInnerIndex];
if (branch == 0) {
*(output + i) = *(input + inputIndex);
} else {
UNI_MEMCPY(output + i * tileSize, input + inputIndex * tileSize, tileSize);
}
}
}
}
inline void array_transpose(unsigned int elementSize,
unsigned int *inputDims,
const void *input,
unsigned int *outputDims,
void *output,
unsigned int *transposeDims,
int inputDimsNum,
int outputDimsNum)
{
unsigned int sizeInner = 1;
int sizeInnerIndex = 0;
for (int i = outputDimsNum - 1; i >= 0; i--) {
if ((int)transposeDims[i] == i) {
sizeInner *= inputDims[inputDimsNum - 1 - i];
sizeInnerIndex++;
} else {
break;
}
}
int tileSize = elementSize * sizeInner;
int in = inputDims[inputDimsNum - 1], ihiw = 0, ic = 0;
if (outputDimsNum - sizeInnerIndex == 3 && transposeDims[0] == 0 && transposeDims[1] == 2 &&
transposeDims[2] == 1) {
ic = inputDims[inputDimsNum - 2];
ihiw = inputDims[inputDimsNum - 3];
}
if (outputDimsNum - sizeInnerIndex == 4 && transposeDims[0] == 0 && transposeDims[1] == 2 &&
transposeDims[2] == 3 && transposeDims[3] == 1) {
ic = inputDims[inputDimsNum - 2];
ihiw = inputDims[inputDimsNum - 3] * inputDims[inputDimsNum - 4];
}
if (ic > 0 && ihiw > 0 && input != output) {
#ifdef _USE_OPENMP
#pragma omp parallel for num_threads(OMP_NUM_THREADS)
#endif
for (int o = 0; o < in * ihiw; o++) {
int n = o / ihiw;
int hw = o % ihiw;
U8 *dst = (U8 *)output + o * ic * tileSize;
for (int c = 0; c < ic; c++, dst += tileSize) {
const U8 *src = (const U8 *)input + ((n * ic + c) * ihiw + hw) * tileSize;
UNI_MEMCPY(dst, src, tileSize);
}
}
return;
}
unsigned int inputSize = 1, outputSize = 1;
for (int i = 0; i < inputDimsNum; i++) {
inputSize *= inputDims[i];
}
for (int i = 0; i < outputDimsNum; i++) {
outputSize *= outputDims[i];
}
CHECK_REQUIREMENT(inputSize == outputSize);
outputSize = outputSize / sizeInner;
const char *inputPtr = (const char *)input;
char *outputPtr = (char *)output;
if (sizeInner == 1 && elementSize == 4) {
inner_transpose_template<0, int>(elementSize, inputDims, (const int *)input, outputDims,
(int *)output, transposeDims, inputDimsNum, outputDimsNum, outputSize, sizeInnerIndex);
} else if (sizeInner == 1 && elementSize == 2) {
inner_transpose_template<0, short>(elementSize, inputDims, (const short *)input, outputDims,
(short *)output, transposeDims, inputDimsNum, outputDimsNum, outputSize, sizeInnerIndex);
} else {
inner_transpose_template<1, char>(tileSize, inputDims, (const char *)input, outputDims,
(char *)output, transposeDims, inputDimsNum, outputDimsNum, outputSize, sizeInnerIndex);
}
}
inline void array_transpose_naive(unsigned int elementSize,
unsigned int *inputDims,
const void *input,
unsigned int *outputDims,
void *output,
unsigned int *transposeDims,
int dimsNum)
{
if (dimsNum <= 1) {
return;
}
unsigned int inputSize = 1, outputSize = 1;
for (int i = 0; i < dimsNum; i++) {
inputSize *= inputDims[i];
outputSize *= outputDims[i];
}
const char *inputPtr = (const char *)input;
char *outputPtr = (char *)output;
#ifdef _USE_OPENMP
#pragma omp parallel num_threads(OMP_NUM_THREADS)
#endif
{
std::vector<unsigned int> inputLocalIndex(dimsNum);
#ifdef _USE_OPENMP
#pragma omp for
#endif
for (unsigned int i = 0; i < outputSize; i++) {
unsigned int outputIndex = i;
for (int j = 0; j < dimsNum; j++) {
unsigned int value = outputIndex % outputDims[j];
outputIndex /= outputDims[j];
inputLocalIndex[dimsNum - 1 - transposeDims[dimsNum - 1 - j]] = value;
}
unsigned int inputIndex = 0;
for (int j = dimsNum - 1; j > 0; j--) {
inputIndex = (inputIndex + inputLocalIndex[j]) * inputDims[j - 1];
}
inputIndex += inputLocalIndex[0];
UNI_MEMCPY(
outputPtr + i * elementSize, inputPtr + inputIndex * elementSize, elementSize);
}
}
}
#endif
|
GB_unaryop__identity_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_int16
// op(A') function: GB_tran__identity_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_int16
(
uint16_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ceil_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ceil_fp32_fp32)
// op(A') function: GB (_unop_tran__ceil_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = ceilf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ceilf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = ceilf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ceil_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = ceilf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = ceilf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ceil_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
for-9.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
extern void bar(int);
void foo (int n)
{
int i;
#pragma omp for schedule(guided) ordered
for (i = 0; i < n; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_loop_ordered_guided_start" 1 "ompexp" } } */
/* { dg-final { scan-tree-dump-times "GOMP_loop_ordered_guided_next" 1 "ompexp" } } */
|
overlapping.h | #include "CSC.h"
#include "align.h"
#include "common.h"
#include "../kmercode/hash_funcs.h"
#include "../kmercode/Kmer.hpp"
#include "../kmercode/Buffer.h"
#include "../kmercode/common.h"
#include "../kmercode/fq_reader.h"
#include "../kmercode/ParallelFASTQ.h"
#include "../libcuckoo/cuckoohash_map.hh"
#ifndef __NVCC__
#include "../xavier/xavier.h"
#endif
#include <seqan/sequence.h>
#include <seqan/align.h>
#include <seqan/score.h>
#include <seqan/modifier.h>
#include <seqan/seeds.h>
#include <omp.h>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <cstdlib>
#include <algorithm>
#include <ctype.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
#include <set>
using namespace seqan;
#ifdef __NVCC__
#include "../loganGPU/logan.cuh"
#endif
typedef Seed<Simple> TSeed;
typedef SeedSet<TSeed> TSeedSet;
#define PERCORECACHE (1024 * 1024)
#define TIMESTEP
#ifndef PRINT
#define PRINT
#endif
//#define THREADLIMIT
//#define MAX_NUM_THREAD 1
//#define OSX
//#define LINUX
//#define RAM
#ifndef __SIMD__
#define __SIMD__
#endif
#ifdef OSX
#include <mach/mach.h>
#include <mach/vm_statistics.h>
#include <mach/mach_types.h>
#include <mach/mach_init.h>
#include <mach/mach_host.h>
#endif
#ifdef LINUX
#include "sys/types.h"
#include "sys/sysinfo.h"
struct sysinfo info;
#endif
double safety_net = 1.5;
/*
Multithreaded prefix sum
Inputs:
in: an input array
size: the length of the input array "in"
nthreads: number of threads used to compute the prefix sum
Output:
return an array of size "size+1"
the memory of the output array is allocated internallay
Example:
in = [2, 1, 3, 5]
out = [0, 2, 3, 6, 11]
*/
template <typename T>
T* prefixsum(T* in, int size, int nthreads)
{
std::vector<T> tsum(nthreads+1);
tsum[0] = 0;
T* out = new T[size+1];
out[0] = 0;
T* psum = &out[1];
#pragma omp parallel
{
int ithread = omp_get_thread_num();
T sum = 0;
#pragma omp for schedule(static)
for (int i=0; i<size; i++)
{
sum += in[i];
psum[i] = sum;
}
tsum[ithread+1] = sum;
#pragma omp barrier
T offset = 0;
for(int i=0; i<(ithread+1); i++)
{
offset += tsum[i];
}
#pragma omp for schedule(static)
for (int i=0; i<size; i++)
{
psum[i] += offset;
}
}
return out;
}
/* fix according to PAF format */
void toOriginalCoordinates(int& begpH, int& endpH, const int lenH)
{
unsigned int tmp = begpH;
begpH = lenH-endpH;
endpH = lenH-tmp;
}
// estimate the number of floating point operations of SpGEMM
template <typename IT, typename NT>
IT* estimateFLOP(const CSC<IT,NT> & A, const CSC<IT,NT> & B, bool lowtriout)
{
if(A.isEmpty() || B.isEmpty())
{
return NULL;
}
IT* colflopC = new IT[B.cols]; // nnz in every column of C
#pragma omp parallel for
for(IT i=0; i< B.cols; ++i)
{
colflopC[i] = 0;
}
#pragma omp parallel for
for(IT i=0; i < B.cols; ++i)
{
// size_t nnzcolB = B.colptr[i+1] - B.colptr[i]; // nnz in the current column of B
int myThread = omp_get_thread_num();
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
IT nnzcolA = 0;
if(lowtriout)
{
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
// i is the column_id of the output and A.rowids[k] is the row_id of the output
if(i < A.rowids[k])
{
++nnzcolA;
}
}
}
else
{
nnzcolA = A.colptr[col2fetch+1]- A.colptr[col2fetch]; // nonzero count of that column of A
}
colflopC[i] += nnzcolA;
}
}
return colflopC;
}
// estimate space for result of SpGEMM with Hash
template <typename IT, typename NT>
IT* estimateNNZ_Hash(const CSC<IT,NT>& A, const CSC<IT,NT>& B, const IT* flopC, bool lowtriout)
{
if(A.isEmpty() || B.isEmpty())
{
return NULL;
}
IT* colnnzC = new IT[B.cols]; // nnz in every column of C
#pragma omp parallel for
for(IT i=0; i< B.cols; ++i)
{
colnnzC[i] = 0;
}
#pragma omp parallel for
for(IT i=0; i < B.cols; ++i) // for each column of B
{
// size_t nnzcolB = B.colptr[i+1] - B.colptr[i]; //nnz in the current column of B
int myThread = omp_get_thread_num();
// Hash
const unsigned int minHashTableSize = 16;
const unsigned int hashScale = 107;
// Initialize hash tables
size_t ht_size = minHashTableSize;
while(ht_size < flopC[i]) //ht_size is set as 2^n
{
ht_size <<= 1;
}
std::vector<IT> globalHashVec(ht_size);
for(size_t j=0; j < ht_size; ++j)
{
globalHashVec[j] = -1;
}
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
IT key = A.rowids[k];
if(lowtriout && i >= key) // i is the column_id of the output and key is the row_id of the output
continue;
IT hash = (key*hashScale) & (ht_size-1);
while (1) //hash probing
{
if (globalHashVec[hash] == key) //key is found in hash table
{
break;
}
else if (globalHashVec[hash] == -1) //key is not registered yet
{
globalHashVec[hash] = key;
colnnzC[i] ++;
break;
}
else //key is not found
{
hash = (hash+1) & (ht_size-1); // don't exit the while loop yet
}
}
}
}
}
return colnnzC;
}
//! Hash based column-by-column spgemm algorithm. Based on earlier code by Buluc, Azad, and Nagasaka
//! If lowtriout= true, then only creates the lower triangular part: no diagonal and no upper triangular
//! input matrices do not need to have sorted rowids within each column
template <typename IT, typename NT, typename MultiplyOperation, typename AddOperation, typename FT>
void LocalSpGEMM(IT & start, IT & end, const CSC<IT,NT> & A, const CSC<IT,NT> & B, MultiplyOperation multop, AddOperation addop,
vector<IT> * RowIdsofC, vector<FT> * ValuesofC, IT* colptrC, bool lowtriout)
{
#pragma omp parallel for
for(IT i = start; i < end; ++i) // for bcols of B (one block)
{
const IT minHashTableSize = 16;
const IT hashScale = 107;
size_t nnzcolC = colptrC[i+1] - colptrC[i]; //nnz in the current column of C (=Output)
IT ht_size = minHashTableSize;
while(ht_size < nnzcolC) //ht_size is set as 2^n
{
ht_size <<= 1;
}
std::vector< std::pair<IT,FT>> globalHashVec(ht_size);
// Initialize hash tables
for(IT j=0; j < ht_size; ++j)
{
globalHashVec[j].first = -1;
}
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
NT valueofB = B.values[j];
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
IT key = A.rowids[k];
// i is the column_id of the output and key is the row_id of the output
if(lowtriout && i >= key)
continue;
// GG: modified to get read ids needed to compute alnlenerlap length
FT result = multop(A.values[k], valueofB, key, i);
IT hash = (key*hashScale) & (ht_size-1);
while (1) //hash probing
{
if (globalHashVec[hash].first == key) //key is found in hash table
{ // GG: addop temporary modify, remalnlene key, i after testing
globalHashVec[hash].second = addop(result, globalHashVec[hash].second, key, i);
break;
}
else if (globalHashVec[hash].first == -1) //key is not registered yet
{
globalHashVec[hash].first = key;
globalHashVec[hash].second = result;
break;
}
else //key is not found
{
hash = (hash+1) & (ht_size-1); // don't exit the while loop yet
}
}
}
}
// gather non-zero elements from hash table (and then sort them by row indices if needed)
IT index = 0;
for (IT j=0; j < ht_size; ++j)
{
if (globalHashVec[j].first != -1)
{
globalHashVec[index++] = globalHashVec[j];
}
}
#ifdef SORTCOLS
std::sort(globalHashVec.begin(), globalHashVec.begin() + index, sort_less<IT, NT>);
#endif
RowIdsofC[i-start].resize(index);
ValuesofC[i-start].resize(index);
for (IT j=0; j< index; ++j)
{
RowIdsofC[i-start][j] = globalHashVec[j].first;
ValuesofC[i-start][j] = globalHashVec[j].second;
}
}
}
double estimateMemory(const BELLApars & b_pars)
{
double free_memory;
if (b_pars.userDefMem)
{
free_memory = b_pars.totalMemory * 1024 * 1024;
}
else
{
#if defined (OSX) // OSX-based memory consumption implementation
vm_unsigned int page_size;
mach_port_t mach_port;
mach_msg_type_number_t count;
vm_statistics64_data_t vm_stats;
mach_port = mach_host_self();
count = sizeof(vm_stats) / sizeof(natural_t);
if (KERN_SUCCESS == host_page_size(mach_port, &page_size) &&
KERN_SUCCESS == host_statistics64(mach_port, HOST_VM_INFO,
(host_info64_t)&vm_stats, &count))
{
free_memory = (double) vm_stats.free_count * (double)page_size;
}
#elif defined (LINUX) // LINUX-based memory consumption implementation
if(sysinfo(&info) != 0)
{
return false;
}
free_memory = info.freeram * info.mem_unit;
free_memory += info.freeswap * info.mem_unit;
free_memory += info.bufferram * info.mem_unit;
#else
free_memory = b_pars.totalMemory * 1024 * 1024; // memory is neither user-supplied nor can be estimated, so use BELLA's default
#endif
}
return free_memory;
}
#ifndef __NVCC__
// ======================================= //
// CPU Functions //
// ======================================= //
#ifdef __SIMD__
void PostAlignDecision(const xavierResult& maxExtScore,
#else
void PostAlignDecision(const seqAnResult& maxExtScore,
#endif
const readType_& read1, const readType_& read2,
const BELLApars& b_pars, double ratiophi, int count, stringstream& myBatch, size_t& outputted,
size_t& numBasesAlignedTrue, size_t& numBasesAlignedFalse, bool& passed, int const& matches)
{
auto maxseed = maxExtScore.seed; // returns a seqan:Seed object
// {begin/end}Position{V/H}: Returns the begin/end position of the seed in the query (vertical/horizonral direction)
// these four return seqan:Tposition objects
#ifdef __SIMD__
int begpV = getBeginPositionV(maxseed);
int endpV = getEndPositionV(maxseed);
int begpH = getBeginPositionH(maxseed);
int endpH = getEndPositionH(maxseed);
#else
int begpV = beginPositionV(maxseed);
int endpV = endPositionV(maxseed);
int begpH = beginPositionH(maxseed);
int endpH = endPositionH(maxseed);
#endif
// Get references for better naming
const string& seq1 = read1.seq; // H
const string& seq2 = read2.seq; // Vzw
unsigned short int read1len = seq1.length();
unsigned short int read2len = seq2.length();
unsigned short int overlapLenV = endpV - begpV;
unsigned short int overlapLenH = endpH - begpH;
unsigned short int minLeft = min(begpV, begpH);
unsigned short int minRight = min(read2len - endpV, read1len - endpH);
unsigned short int ov = minLeft + minRight + (overlapLenV + overlapLenH) / 2;
unsigned short int normLen = max(overlapLenV, overlapLenH);
unsigned short int minLen = min(overlapLenV, overlapLenH);
if(b_pars.fixedThreshold == -1)
{
float mythreshold = (1 - b_pars.deltaChernoff) * (ratiophi * (float)ov);
if((float)maxExtScore.score >= mythreshold)
{
passed = true;
}
}
else if(maxExtScore.score >= b_pars.fixedThreshold) // GG: this is only useful for debugging
{
passed = true;
}
if(passed)
{
if(!b_pars.outputPaf) // BELLA output format
{
myBatch << read2.nametag << '\t' << read1.nametag << '\t' << count << '\t' << maxExtScore.score << '\t' << ov << '\t' << maxExtScore.strand << '\t' <<
begpV << '\t' << endpV << '\t' << read2len << '\t' << begpH << '\t' << endpH << '\t' << read1len << endl;
}
else
{
std::string pafstrand; // maxExtScore not modifiable
unsigned short int mapq = 255; // mapping quality (0-255; 255 for missing)
if(maxExtScore.strand == "n") pafstrand = "+";
else pafstrand = "-";
if(pafstrand == "-")
toOriginalCoordinates(begpH, endpH, read1len);
// PAF format is the output format used by minimap/minimap2: https://github.com/lh3/miniasm/blob/master/PAF.md
myBatch << read2.nametag << '\t' << read2len << '\t' << begpV << '\t' << endpV << '\t' << pafstrand << '\t' <<
read1.nametag << '\t' << read1len << '\t' << begpH << '\t' << endpH << '\t' << maxExtScore.score << '\t' << ov << '\t' << mapq << endl;
}
++outputted;
numBasesAlignedTrue += (endpV-begpV);
}
else
{
numBasesAlignedFalse += (endpV-begpV);
}
}
template <typename IT, typename FT>
auto RunPairWiseAlignments(IT start, IT end, IT offset, IT * colptrC, IT * rowids, FT * values, const readVector_& reads,
char* filename, const BELLApars& b_pars, const double& ratiophi)
{
size_t alignedpairs = 0;
size_t alignedbases = 0;
size_t totalreadlen = 0;
size_t totaloutputt = 0;
size_t totsuccbases = 0;
size_t totfailbases = 0;
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
vector<stringstream> vss(numThreads); // any chance of false sharing here? depends on how stringstream is implemented. optimize later if needed...
#pragma omp parallel for schedule(dynamic)
for(IT j = start; j < end; ++j) // for (end-start) columns of A^T A (one block)
{
size_t numAlignmentsThread = 0;
size_t numBasesAlignedThread = 0;
size_t readLengthsThread = 0;
size_t numBasesAlignedTrue = 0;
size_t numBasesAlignedFalse = 0;
size_t outputted = 0;
int ithread = omp_get_thread_num();
for (IT i = colptrC[j]; i < colptrC[j+1]; ++i) // all nonzeros in that column of A^T A
{
unsigned int rid = rowids[i-offset]; // row id
unsigned int cid = j; // column id
const string& seq1 = reads[rid].seq; // get reference for readibility
const string& seq2 = reads[cid].seq; // get reference for readibility
unsigned short int seq1len = seq1.length();
unsigned short int seq2len = seq2.length();
spmatPtr_ val = values[i-offset];
if(!b_pars.skipAlignment) // fix -z to not print
{
numAlignmentsThread++;
readLengthsThread = readLengthsThread + seq1len + seq2len;
#ifdef __SIMD__
xavierResult maxExtScore;
#else
seqAnResult maxExtScore;
#endif
bool passed = false;
// GG: number of matching kmer into the majority voted bin
unsigned short int matches = val->chain();
unsigned short int overlap;
pair<int, int> kmer = val->choose();
int i = kmer.first, j = kmer.second;
// GG: nucleotide alignment
#ifdef __SIMD__
maxExtScore = xavierAlign(seq1, seq2, seq1len, i, j, b_pars.xDrop, b_pars.kmerSize);
#else
maxExtScore = alignSeqAn(seq1, seq2, seq1len, i, j, b_pars.xDrop, b_pars.kmerSize);
#endif
PostAlignDecision(maxExtScore, reads[rid], reads[cid], b_pars, ratiophi, val->count, vss[ithread],
outputted, numBasesAlignedTrue, numBasesAlignedFalse, passed, matches);
#ifdef __SIMD__
numBasesAlignedThread += getEndPositionV(maxExtScore.seed)-getBeginPositionV(maxExtScore.seed);
#else
numBasesAlignedThread += endPositionV(maxExtScore.seed)-beginPositionV(maxExtScore.seed);
#endif
}
else // if skipAlignment == false do alignment, else save just some info on the pair to file
{
pair<int, int> kmer = val->choose();
int i = kmer.first, j = kmer.second;
int overlap = overlapop(reads[rid].seq, reads[cid].seq, i, j, b_pars.kmerSize);
vss[ithread] << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
overlap << '\t' << seq2len << '\t' << seq1len << endl;
++outputted;
// vss[ithread] << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
// seq2len << '\t' << seq1len << std::endl;
// ++outputted;
}
} // all nonzeros in that column of A^T A
#pragma omp critical
{
alignedpairs += numAlignmentsThread;
alignedbases += numBasesAlignedThread;
totalreadlen += readLengthsThread;
totaloutputt += outputted;
totsuccbases += numBasesAlignedTrue;
totfailbases += numBasesAlignedFalse;
}
} // all columns from start...end (omp for loop)
double outputting = omp_get_wtime();
int64_t* bytes = new int64_t[numThreads];
for(int i = 0; i < numThreads; ++i)
{
vss[i].seekg(0, ios::end);
bytes[i] = vss[i].tellg();
vss[i].seekg(0, ios::beg);
}
int64_t bytestotal = std::accumulate(bytes, bytes+numThreads, static_cast<int64_t>(0));
std::ofstream ofs(filename, std::ios::binary | std::ios::app);
std::string str1 = std::to_string((double)bytestotal/(double)(1024 * 1024));
std::string str2 = " MB";
std::string OutputSize = str1 + str2;
printLog(OutputSize);
ofs.seekp(bytestotal - 1);
ofs.write("", 1); // this will likely create a sparse file so the actual disks won't spin yet
ofs.close();
#pragma omp parallel
{
int ithread = omp_get_thread_num();
FILE *ffinal;
if ((ffinal = fopen(filename, "rb+")) == NULL) // then everyone fills it
{
fprintf(stderr, "File %s failed to open at thread %d\n", filename, ithread);
}
int64_t bytesuntil = std::accumulate(bytes, bytes+ithread, static_cast<int64_t>(0));
fseek (ffinal, bytesuntil, SEEK_SET);
std::string text = vss[ithread].str();
fwrite(text.c_str(),1, bytes[ithread] ,ffinal);
fflush(ffinal);
fclose(ffinal);
}
delete [] bytes;
double timeoutputt = omp_get_wtime()-outputting;
return make_tuple(alignedpairs, alignedbases, totalreadlen, totaloutputt, totsuccbases, totfailbases, timeoutputt);
}
/**
* Sparse multithreaded GEMM.
**/
template <typename IT, typename NT, typename FT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSC<IT,NT>& A, const CSC<IT,NT>& B, MultiplyOperation multop, AddOperation addop, const readVector_& reads,
FT& getvaluetype, char* filename, const BELLApars& b_pars, const double& ratiophi)
{
double free_memory = estimateMemory(b_pars);
std::string str1 = std::to_string(free_memory / (1024 * 1024));
std::string str2 = " MB";
std::string AvailableRAM = str1 + str2;
printLog(AvailableRAM);
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* flopC = estimateFLOP(A, B, true);
IT* flopptr = prefixsum<IT>(flopC, B.cols, numThreads);
IT flops = flopptr[B.cols];
std::string FLOPs = std::to_string(flops);
printLog(FLOPs);
IT* colnnzC = estimateNNZ_Hash(A, B, flopC, true);
IT* colptrC = prefixsum<IT>(colnnzC, B.cols, numThreads); // colptrC[i] = rolling sum of nonzeros in C[1...i]
delete [] colnnzC;
delete [] flopptr;
delete [] flopC;
IT nnzc = colptrC[B.cols];
double compression_ratio = (double)flops / nnzc;
uint64_t required_memory = safety_net * nnzc * (sizeof(FT)+sizeof(IT)); // required memory to form the output
int stages = std::ceil((double) required_memory/ free_memory); // form output in stages
uint64_t nnzcperstage = free_memory / (safety_net * (sizeof(FT)+sizeof(IT)));
std::string nnzOutput = std::to_string(nnzc);
std::string FreeMemory = std::to_string(free_memory) + " MB";
std::string CompressionRatio = std::to_string(compression_ratio);
std::string RequiredMemory = std::to_string(required_memory) + " MB";
std::string RequiredStages = std::to_string(stages);
printLog(nnzOutput);
printLog(CompressionRatio);
printLog(FreeMemory);
printLog(RequiredMemory);
printLog(RequiredStages);
IT * colStart = new IT[stages+1]; // one array is enough to set stage boundaries
colStart[0] = 0;
for(int i = 1; i < stages; ++i) // colsPerStage is no longer fixed (helps with potential load imbalance)
{
// std::upper_bound returns an iterator pointing to the first element
// in the range [first, last) that is greater than value, or last if no such element is found
auto upper = std::upper_bound(colptrC, colptrC+B.cols+1, i*nnzcperstage );
colStart[i] = upper - colptrC - 1; // we don't want the element that exceeds our budget, we want the one just before that
}
colStart[stages] = B.cols;
for(int b = 0; b < stages; ++b)
{
double alnlenl = omp_get_wtime();
vector<IT> * RowIdsofC = new vector<IT>[colStart[b+1]-colStart[b]]; // row ids for each column of C (bunch of cols)
vector<FT> * ValuesofC = new vector<FT>[colStart[b+1]-colStart[b]]; // values for each column of C (bunch of cols)
LocalSpGEMM(colStart[b], colStart[b+1], A, B, multop, addop, RowIdsofC, ValuesofC, colptrC, true);
double alnlen2 = omp_get_wtime();
std::string ColumnsRange = "[" + std::to_string(colStart[b]) + " - " + std::to_string(colStart[b+1]) + "]";
printLog(ColumnsRange);
std::string OverlapTime = std::to_string(alnlen2-alnlenl) + " seconds";
printLog(OverlapTime);
IT endnz = colptrC[colStart[b+1]];
IT begnz = colptrC[colStart[b]];
IT * rowids = new IT[endnz-begnz];
FT * values = new FT[endnz-begnz];
for(IT i=colStart[b]; i<colStart[b+1]; ++i) // combine step
{
IT loccol = i-colStart[b];
IT locnz = colptrC[i]-begnz;
copy(RowIdsofC[loccol].begin(), RowIdsofC[loccol].end(), rowids + locnz);
copy(ValuesofC[loccol].begin(), ValuesofC[loccol].end(), values + locnz);
}
delete [] RowIdsofC;
delete [] ValuesofC;
// GG: all paralelism moved to GPU we can do better
tuple<size_t, size_t, size_t, size_t, size_t, size_t, double> alignstats; // (alignedpairs, alignedbases, totalreadlen, outputted, alignedtrue, alignedfalse, timeoutputt)
alignstats = RunPairWiseAlignments(colStart[b], colStart[b+1], begnz, colptrC, rowids, values, reads, filename, b_pars, ratiophi);
if(!b_pars.skipAlignment)
{
double elapsed = omp_get_wtime()-alnlen2;
double aligntime = elapsed-get<6>(alignstats); // substracting outputting time
std::string ColumnsRange = "[" + std::to_string(colStart[b]) + " - " + std::to_string(colStart[b+1]) + "]";
printLog(ColumnsRange);
std::string AlignmentTime = std::to_string(aligntime) + " seconds";
printLog(AlignmentTime);
std::string AlignmentRate = std::to_string((int)(static_cast<double>(get<1>(alignstats))/aligntime)) + " bases/second";
printLog(AlignmentRate);
std::string AverageReadLength = std::to_string((int)(static_cast<double>(get<2>(alignstats))/(2*get<0>(alignstats))));
printLog(AverageReadLength);
std::string PairsAligned = std::to_string(get<0>(alignstats));
printLog(PairsAligned);
std::string AverageLengthSuccessfulAlignment = std::to_string((int)(static_cast<double>(get<4>(alignstats))/get<3>(alignstats))) + " bps";
printLog(AverageLengthSuccessfulAlignment);
std::string AverageLengthFailedAlignment = std::to_string((int)(static_cast<double>(get<5>(alignstats)) / (get<0>(alignstats) - get<3>(alignstats)))) + " bps";
printLog(AverageLengthFailedAlignment);
}
int LinesOutputted = get<3>(alignstats);
printLog(LinesOutputted);
std::string OutputtingTime = std::to_string(get<6>(alignstats)) + " seconds";
printLog(OutputtingTime);
delete [] rowids;
delete [] values;
} // for(int b = 0; b < states; ++b)
delete [] colptrC;
delete [] colStart;
}
#else // #ifndef __NVCC__
// ======================================= //
// GPU Functions //
// ======================================= //
void PostAlignDecisionGPU(const loganResult& maxExtScore, const readType_& read1, const readType_& read2,
const BELLApars& b_pars, double ratiophi, int count, stringstream& myBatch, size_t& outputted,
size_t& numBasesAlignedTrue, size_t& numBasesAlignedFalse, bool& passed)
{
// returns a Logan::Seed object
SeedL maxseed = maxExtScore.seed;
// {begin/end}Position{V/H}: Returns the begin/end position of the seed in the query (vertical/horizonral direction)
// these four return seqan:Tposition objects
auto begpV = getBeginPositionV(maxseed);
auto endpV = getEndPositionV(maxseed);
auto begpH = getBeginPositionH(maxseed);
auto endpH = getEndPositionH(maxseed);
// get references for better naming
const string& seq1 = read1.seq; // H
const string& seq2 = read2.seq; // Vzw
unsigned short int read1len = seq1.length();
unsigned short int read2len = seq2.length();
// GG: divergence estimation
unsigned short int overlapLenV = endpV - begpV;
unsigned short int overlapLenH = endpH - begpH;
unsigned short int minLeft = min(begpV, begpH);
unsigned short int minRight = min(read2len - endpV, read1len - endpH);
unsigned short int ov = minLeft + minRight + (overlapLenV + overlapLenH) / 2;
unsigned short int normLen = max(overlapLenV, overlapLenH);
unsigned short int minLen = min(overlapLenV, overlapLenH);
if(b_pars.fixedThreshold == -1)
{
double mythreshold = (1 - b_pars.deltaChernoff) * (ratiophi * (double)ov);
if((double)maxExtScore.score >= mythreshold)
{
passed = true;
}
}
else if(maxExtScore.score >= b_pars.fixedThreshold) // GG: this is only useful for debugging
{
passed = true;
}
if(passed)
{
if(!b_pars.outputPaf) // BELLA output format
{
myBatch << read2.nametag << '\t' << read1.nametag << '\t' << count << '\t' << maxExtScore.score << '\t' << ov << '\t' << maxExtScore.strand << '\t' <<
begpV << '\t' << endpV << '\t' << read2len << '\t' << begpH << '\t' << endpH << '\t' << read1len << endl;
}
else
{
std::string pafstrand; // maxExtScore not modifiable
unsigned short int mapq = 255; // mapping quality (0-255; 255 for missing)
if(maxExtScore.strand == "n") pafstrand = "+";
else pafstrand = "-";
if(pafstrand == "-")
toOriginalCoordinates(begpH, endpH, read1len);
// PAF format is the output format used by minimap/minimap2: https://github.com/lh3/miniasm/blob/master/PAF.md
myBatch << read2.nametag << '\t' << read2len << '\t' << begpV << '\t' << endpV << '\t' << pafstrand << '\t' <<
read1.nametag << '\t' << read1len << '\t' << begpH << '\t' << endpH << '\t' << maxExtScore.score << '\t' << ov << '\t' << mapq << endl;
}
++outputted;
numBasesAlignedTrue += (endpV-begpV);
}
else
{
numBasesAlignedFalse += (endpV-begpV);
}
}
// (unsigned int, unsigned int, unsigned int, unsigned int *, unsigned int *, spmatPtr_ *,
// const readVector_, const BELLApars, char *, double)
template <typename IT, typename FT>
std::tuple<uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, double>
RunPairWiseAlignmentsGPU(IT start, IT end, IT offset, IT * colptrC, IT * rowids, FT * values, const readVector_& reads,
const BELLApars& b_pars, char* filename, double ratiophi)
{
stringstream ss;
vector<string> seq1s;
vector<string> seq2s;
vector<SeedL> seeds;
vector<loganResult> maxExtScoreL;
uint64_t outputted = 0;
int count = 0;
//#pragma omp parallel for schedule(dynamic) // keep the order for the post evaluation code
for(IT j = start; j < end; ++j) // acculate sequences for GPU batch alignment
{
count++;
for (IT i = colptrC[j]; i < colptrC[j+1]; ++i)
{
unsigned int rid = rowids[i-offset]; // row id
unsigned int cid = j; // column id
const string& seq1 = reads[rid].seq; // get reference for readibility
const string& seq2 = reads[cid].seq; // get reference for readibility
unsigned short int seq1len = seq1.length();
unsigned short int seq2len = seq2.length();
spmatPtr_ val = values[i-offset];
if(!b_pars.skipAlignment) // fix -z to not print
{
loganResult localRes;
// GG: number of matching kmer into the majority voted bin
unsigned short int matches = val->chain();
pair<int, int> kmer = val->choose();
int i = kmer.first, j = kmer.second;
std::string strand = "n";
SeedL seed(i, j, i + b_pars.kmerSize, j + b_pars.kmerSize);
std::string seedH = seq1.substr(getBeginPositionH(seed), b_pars.kmerSize);
std::string seedV = seq2.substr(getBeginPositionV(seed), b_pars.kmerSize);
std::string seedHcpy = reversecomplement(seedH);
std::string cpyseq1(seq1);
if(seedHcpy == seedV)
{
strand = "c";
std::reverse(std::begin(cpyseq1), std::end(cpyseq1));
std::transform(std::begin(cpyseq1), std::end(cpyseq1), std::begin(cpyseq1), complementbase);
setBeginPositionH(seed, seq1len - i - b_pars.kmerSize);
setBeginPositionV(seed, j);
setEndPositionH(seed, seq1len - i);
setEndPositionV(seed, j + b_pars.kmerSize);
}
localRes.strand = strand;
seeds.push_back(seed);
seq2s.push_back(seq2);
seq1s.push_back(cpyseq1);
maxExtScoreL.push_back(localRes);
}
else // if skipAlignment == false do alignment, else save just some info on the pair to file
{
pair<int, int> kmer = val->choose();
int i = kmer.first, j = kmer.second;
int overlap = overlapop(reads[rid].seq, reads[cid].seq, i, j, b_pars.kmerSize);
// vss[ithread] << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
// seq2len << '\t' << seq1len << endl;
ss << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
overlap << '\t' << seq2len << '\t' << seq1len << endl;
++outputted;
}
}
}
uint64_t alignedpairs = 0;
uint64_t alignedbases = 0;
uint64_t totalreadlen = 0;
uint64_t totaloutputt = 0;
uint64_t totsuccbases = 0;
uint64_t totfailbases = 0;
if(!b_pars.skipAlignment) // fix -z to not print
{
std::string AlignmentGPU = "Started";
printLog(AlignmentGPU);
alignLogan(seq1s, seq2s, seeds, b_pars, maxExtScoreL);
AlignmentGPU = "Completed";
printLog(AlignmentGPU);
uint64_t idx = 0;
// no parallelism to keep same order of pairs in alignment
for(IT j = start; j < end; ++j) // for (end-start) columns of A^T A (one block)
{
// uint64_t numAlignmentsThread = 0;
// uint64_t numBasesAlignedThread = 0;
// uint64_t readLengthsThread = 0;
// uint64_t numBasesAlignedTrue = 0;
// uint64_t numBasesAlignedFalse = 0;
for (IT i = colptrC[j]; i < colptrC[j+1]; ++i) // all nonzeros in that column of A^T A
{
unsigned int rid = rowids[i-offset]; // row id
unsigned int cid = j; // column id
const string& seq1 = reads[rid].seq; // get reference for readibility
const string& seq2 = reads[cid].seq; // get reference for readibility
unsigned short int seq1len = seq1.length();
unsigned short int seq2len = seq2.length();
spmatPtr_ val = values[i-offset];
alignedpairs++;
totalreadlen = totalreadlen + seq1len + seq2len;
// readLengthsThread = readLengthsThread + seq1len + seq2len;
bool passed = false;
loganResult maxExtScore = maxExtScoreL[idx];
PostAlignDecisionGPU(maxExtScore, reads[rid], reads[cid], b_pars, ratiophi, val->count,
ss, totaloutputt, totsuccbases, totfailbases, passed);
idx++; // pairs aligned
// numBasesAlignedThread += getEndPositionV(maxExtScore.seed) - getBeginPositionV(maxExtScore.seed);
alignedbases += getEndPositionV(maxExtScore.seed) - getBeginPositionV(maxExtScore.seed);
} // all nonzeros in that column of A^T A
// GG: no need for multithreaded style here
// alignedpairs += numAlignmentsThread;
// alignedbases += numBasesAlignedThread;
// totalreadlen += readLengthsThread;
// totaloutputt += outputted;
// totsuccbases += numBasesAlignedTrue;
// totfailbases += numBasesAlignedFalse;
// printLog(totsuccbases);
// printLog(totfailbases);
} // all columns from start...end (omp for loop)
}
double outputting = omp_get_wtime();
int64_t bytestotal;
ss.seekg(0, ios::end);
bytestotal = ss.tellg();
ss.seekg(0, ios::beg);
std::ofstream ofs(filename, std::ios::binary | std::ios::app);
std::string str1 = std::to_string((double)bytestotal/(double)(1024 * 1024));
std::string str2 = " MB";
std::string OutputSize = str1 + str2;
printLog(OutputSize);
ofs.seekp(bytestotal - 1);
ofs.write("", 1); // this will likely create a sparse file so the actual disks won't spin yet
ofs.close();
FILE *ffinal;
if ((ffinal = fopen(filename, "rb+")) == NULL) // then everyone fills it
{
fprintf(stderr, "File %s failed to open\n", filename);
}
// int64_t bytesuntil = std::accumulate(bytes, bytes+ithread, static_cast<int64_t>(0));
fseek (ffinal , bytestotal , SEEK_SET);
// std::string text = vss[ithread].str();
std::string text = ss.str();
fwrite(text.c_str(), 1, bytestotal, ffinal);
fflush(ffinal);
fclose(ffinal);
double timeoutputt = omp_get_wtime()-outputting;
return std::make_tuple(alignedpairs, alignedbases, totalreadlen, totaloutputt, totsuccbases, totfailbases, timeoutputt);
}
/**
* Sparse multithreaded GEMM.
**/
template <typename IT, typename NT, typename FT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMMGPU(const CSC<IT,NT> & A, const CSC<IT,NT> & B, MultiplyOperation multop, AddOperation addop, const readVector_& reads,
FT& getvaluetype, char* filename, const BELLApars& b_pars, const double& ratiophi)
{
double free_memory = estimateMemory(b_pars);
std::string str1 = std::to_string(free_memory / (1024 * 1024));
std::string str2 = " MB";
std::string AvailableRAM = str1 + str2;
printLog(AvailableRAM);
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* flopC = estimateFLOP(A, B, true);
IT* flopptr = prefixsum<IT>(flopC, B.cols, numThreads);
IT flops = flopptr[B.cols];
std::string FLOPs = std::to_string(flops);
printLog(FLOPs);
IT* colnnzC = estimateNNZ_Hash(A, B, flopC, true);
IT* colptrC = prefixsum<IT>(colnnzC, B.cols, numThreads); // colptrC[i] = rolling sum of nonzeros in C[1...i]
delete [] colnnzC;
delete [] flopptr;
delete [] flopC;
IT nnzc = colptrC[B.cols];
double compression_ratio = (double)flops / nnzc;
uint64_t required_memory = safety_net * nnzc * (sizeof(FT)+sizeof(IT)); // required memory to form the output
int stages = std::ceil((double) required_memory/ free_memory); // form output in stages
uint64_t nnzcperstage = free_memory / (safety_net * (sizeof(FT)+sizeof(IT)));
std::string nnzOutput = std::to_string(nnzc);
std::string FreeMemory = std::to_string(free_memory) + " MB";
std::string CompressionRatio = std::to_string(compression_ratio);
std::string RequiredMemory = std::to_string(required_memory) + " MB";
std::string RequiredStages = std::to_string(stages);
printLog(nnzOutput);
printLog(CompressionRatio);
printLog(FreeMemory);
printLog(RequiredMemory);
printLog(RequiredStages);
IT * colStart = new IT[stages+1]; // one array is enough to set stage boundaries
colStart[0] = 0;
for(int i = 1; i < stages; ++i) // colsPerStage is no longer fixed (helps with potential load imbalance)
{
// std::upper_bound returns an iterator pointing to the first element
// in the range [first, last) that is greater than value, or last if no such element is found
auto upper = std::upper_bound(colptrC, colptrC+B.cols+1, i*nnzcperstage );
colStart[i] = upper - colptrC - 1; // we don't want the element that exceeds our budget, we want the one just before that
}
colStart[stages] = B.cols;
for(int b = 0; b < stages; ++b)
{
double alnlenl = omp_get_wtime();
vector<IT> * RowIdsofC = new vector<IT>[colStart[b+1]-colStart[b]]; // row ids for each column of C (bunch of cols)
vector<FT> * ValuesofC = new vector<FT>[colStart[b+1]-colStart[b]]; // values for each column of C (bunch of cols)
LocalSpGEMM(colStart[b], colStart[b+1], A, B, multop, addop, RowIdsofC, ValuesofC, colptrC, true);
double alnlen2 = omp_get_wtime();
std::string ColumnsRange = "[" + std::to_string(colStart[b]) + " - " + std::to_string(colStart[b+1]) + "]";
printLog(ColumnsRange);
std::string OverlapTime = std::to_string(alnlen2-alnlenl) + " seconds";
printLog(OverlapTime);
IT endnz = colptrC[colStart[b+1]];
IT begnz = colptrC[colStart[b]];
IT * rowids = new IT[endnz-begnz];
FT * values = new FT[endnz-begnz];
for(IT i=colStart[b]; i<colStart[b+1]; ++i) // combine step
{
IT loccol = i-colStart[b];
IT locnz = colptrC[i]-begnz;
copy(RowIdsofC[loccol].begin(), RowIdsofC[loccol].end(), rowids + locnz);
copy(ValuesofC[loccol].begin(), ValuesofC[loccol].end(), values + locnz);
}
delete [] RowIdsofC;
delete [] ValuesofC;
// GG: all paralelism moved to GPU we can do better
std::tuple<uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, double> alignstats; // (alignedpairs, alignedbases, totalreadlen, outputted, alignedtrue, alignedfalse, timeoutputt)
alignstats = RunPairWiseAlignmentsGPU(colStart[b], colStart[b+1], begnz, colptrC, rowids, values, reads, b_pars, filename, ratiophi);
if(!b_pars.skipAlignment)
{
double elapsed = omp_get_wtime()-alnlen2;
double aligntime = elapsed-get<6>(alignstats); // substracting outputting time
std::string ColumnsRange = "[" + std::to_string(colStart[b]) + " - " + std::to_string(colStart[b+1]) + "]";
printLog(ColumnsRange);
std::string AlignmentTime = std::to_string(aligntime) + " seconds";
printLog(AlignmentTime);
std::string AlignmentRate = std::to_string((int)(static_cast<double>(get<1>(alignstats))/aligntime)) + " bases/second";
printLog(AlignmentRate);
std::string AverageReadLength = std::to_string((int)(static_cast<double>(get<2>(alignstats))/(2*get<0>(alignstats))));
printLog(AverageReadLength);
std::string PairsAligned = std::to_string(get<0>(alignstats));
printLog(PairsAligned);
std::string AverageLengthSuccessfulAlignment = std::to_string((int)(static_cast<double>(get<4>(alignstats))/get<3>(alignstats))) + " bps";
printLog(AverageLengthSuccessfulAlignment);
std::string AverageLengthFailedAlignment = std::to_string((int)(static_cast<double>(get<5>(alignstats)) / (get<0>(alignstats) - get<3>(alignstats)))) + " bps";
printLog(AverageLengthFailedAlignment);
}
int LinesOutputted = get<3>(alignstats);
printLog(LinesOutputted);
std::string OutputtingTime = std::to_string(get<6>(alignstats)) + " seconds";
printLog(OutputtingTime);
delete [] rowids;
delete [] values;
} //for(int b = 0; b < states; ++b)
delete [] colptrC;
delete [] colStart;
}
#endif // #ifdef __NVCC__
|
utils.c | #include "utils.h"
/*
void merge_scores(int * scores, char ** titles, unsigned long int size) {
unsigned long int i1 = 0;
unsigned long int i2 = size / 2;
unsigned long int it = 0;
// allocate memory for temporary buffers
char ** tmp2 = (char **) malloc(size*sizeof(char *));
int * tmp3 = (int *) malloc (size*sizeof(int));
while(i1 < size/2 && i2 < size) {
if (scores[i1] > scores[i2]) {
tmp2[it] = titles[i1];
tmp3[it] = scores[i1];
i1++;
}
else {
tmp2[it] = titles[i2];
tmp3[it] = scores[i2];
i2 ++;
}
it ++;
}
while (i1 < size/2) {
tmp2[it] = titles[i1];
tmp3[it] = scores[i1];
i1++;
it++;
}
while (i2 < size) {
tmp2[it] = titles[i2];
tmp3[it] = scores[i2];
i2++;
it++;
}
memcpy(titles, tmp2, size*sizeof(char *));
memcpy(scores, tmp3, size*sizeof(int));
free(tmp2);
free(tmp3);
}
void mergesort_scores_serial(int * scores, char ** titles, unsigned long int size) {
int tmp_score;
char * tmp_seq;
if (size == 2) {
if (scores[0] <= scores[1]) {
// swap scores
tmp_score = scores[0];
scores[0] = scores[1];
scores[1] = tmp_score;
// swap titles
tmp_seq = titles[0];
titles[0] = titles[1];
titles[1] = tmp_seq;
}
} else {
if (size > 2){
mergesort_scores_serial(scores, titles, size/2);
mergesort_scores_serial(scores + size/2, titles + size/2, size - size/2);
merge_scores(scores, titles, size);
}
}
}
void sort_scores (int * scores, char ** titles, unsigned long int size, int threads) {
if ( threads == 1) {
mergesort_scores_serial(scores, titles, size);
}
else if (threads > 1) {
#pragma omp parallel sections num_threads(threads)
{
#pragma omp section
sort_scores(scores, titles, size/2, threads/2);
#pragma omp section
sort_scores(scores + size/2, titles + size/2, size-size/2, threads-threads/2);
}
merge_scores(scores, titles, size);
} // threads > 1
}
*/
// Wall time
double dwalltime()
{
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
|
GB_unop__asinh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asinh_fp32_fp32)
// op(A') function: GB (_unop_tran__asinh_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = asinhf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = asinhf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = asinhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASINH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asinh_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = asinhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = asinhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asinh_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cneighbors.c | /***
Neighbourhood helper functions accelerated with parallelised C
---------------------------------------------------------------
Copyright (c) 2017 Johannes Buchner
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include<math.h>
#ifdef PARALLEL
#include<omp.h>
#endif
#define IFVERBOSE if(0)
#define IFDEBUG if(0)
#define adouble double
#define bdouble double
#define sqr(x) (pow(x,2))
double most_distant_nearest_neighbor(
const void * xxp, int nsamples, int ndim
) {
const adouble * xx = (const adouble*) xxp;
double nearest_ds[nsamples];
IFVERBOSE {
for (int i = 0; i < nsamples; i++) { // one sample at a time
printf("%d: ", i);
for (int k = 0; k < ndim; k++) {
printf("%e\t", xx[i*ndim + k]);
}
printf("\n");
}
}
#ifdef PARALLEL
#pragma omp parallel for
#endif
for (int i = 0; i < nsamples; i++) { // one sample at a time
// consider all other samples before i
double nearest_d = 1e300;
for (int j = 0; j < nsamples; j++) {
if (j != i) {
double d = 0;
for (int k = 0; k < ndim; k++) {
d += sqr(xx[i*ndim + k] - xx[j*ndim + k]);
}
if (d < nearest_d) {
nearest_d = d;
}
}
}
IFVERBOSE printf("%d: %f\n", i, sqrt(nearest_d));
nearest_ds[i] = sqrt(nearest_d);
}
double furthest_d = nearest_ds[0];
for (int i = 1; i < nsamples; i++) {
if (nearest_ds[i] > furthest_d)
furthest_d = nearest_ds[i];
}
IFVERBOSE printf("result: %f\n", furthest_d);
return furthest_d;
}
int is_within_distance_of(
const void * xxp, int nsamples, int ndim, double maxdistance, const void * yp
) {
const adouble * xx = (const adouble*) xxp;
const adouble * y = (const adouble*) yp;
for (int i = 0; i < nsamples; i++) { // one sample at a time
double d = 0;
for (int k = 0; k < ndim; k++) {
d += sqr(xx[i*ndim + k] - y[k]);
}
if (sqrt(d) < maxdistance)
return 1;
}
return 0;
}
int count_within_distance_of(
const void * xxp, int nsamples, int ndim, double maxdistance,
const void * yyp, int nothers, void * outp, const int countmax
) {
const adouble * xx = (const adouble*) xxp;
const adouble * yy = (const adouble*) yyp;
double * out = (double*) outp;
for (int j = 0; j < nothers; j++) { // one sample at a time
for (int i = 0; i < nsamples; i++) { // one sample at a time
double d = 0;
for (int k = 0; k < ndim; k++) {
d += sqr(xx[i*ndim + k] - yy[j*ndim + k]);
}
if (sqrt(d) < maxdistance) {
out[j]++;
// printf("%d: %f\n", j, out[j]);
if (countmax > 0 && out[j] >= countmax) {
break;
}
}
}
}
return 0;
}
/**
* xxp are double points (nsamples x ndim)
* choicep is whether the point is selected in the bootstrap round (nsamples x nbootstraps)
*/
double bootstrapped_maxdistance(
const void * xxp,
int nsamples, int ndim,
const void * choicep,
int nbootstraps
) {
const adouble * xx = (const adouble*) xxp;
const adouble * chosen = (const adouble*) choicep;
double furthest_ds[nbootstraps];
double furthest_d_bs;
#ifdef PARALLEL
#pragma omp parallel for
#endif
for(int b = 0; b < nbootstraps; b++) {
double nearest_ds[nsamples];
double furthest_d = 0;
//printf("bootstrap round %d\n", b);
// find one that was not chosen
for (int i = 0; i < nsamples; i++) {
if (chosen[i*nbootstraps + b] != 0) continue;
//printf(" considering %d\n", i);
double nearest_d = 1e300;
for (int j = 0; j < nsamples; j++) {
if (chosen[j*nbootstraps + b] == 0) continue;
double d = 0;
for (int k = 0; k < ndim; k++) {
d += sqr(xx[i*ndim + k] - xx[j*ndim + k]);
}
if (d < nearest_d) {
nearest_d = d;
}
}
//printf(" %d: %f\n", i, sqrt(nearest_d));
nearest_ds[i] = sqrt(nearest_d);
}
for (int i = 1; i < nsamples; i++) {
if (chosen[i*nbootstraps + b] != 0) continue;
if (nearest_ds[i] > furthest_d)
furthest_d = nearest_ds[i];
}
//printf("bootstrap round %d gave %f\n", b, furthest_d);
furthest_ds[b] = furthest_d;
}
furthest_d_bs = furthest_ds[0];
for (int i = 1; i < nbootstraps; i++) {
if (furthest_ds[i] > furthest_d_bs)
furthest_d_bs = furthest_ds[i];
}
IFVERBOSE printf("result: %f\n", furthest_d_bs);
return furthest_d_bs;
}
|
simple.h | #pragma once
#ifndef BRONKERBOSCHSIMPLE_H
#define BRONKERBOSCHSIMPLE_H
#include "../general.h"
namespace BkSimple
{
//TODO: Using new local Variables since DISABLE_COPY restrict re assignments
//===> Confirm with Jakub that this doesn't harm performance
//template <typename Set, typename SGraph>
template <class SGraph, class Set = typename SGraph::Set>
void bronKerboschRec(Set R, Set P, Set X, std::vector<Set> &sol, const SGraph &graph)
{
//NOTE: At the moment, this is the only place where in place operation is really needed
if (P.cardinality() == 0 && X.cardinality() == 0)
{
#ifdef BK_COUNT
#pragma omp atomic
BK_CLIQUE_COUNTER++; //initialize counter
#endif
#ifdef MINEBENCH_TEST
sol.push_back(std::move(R));
#endif
}
else
{
auto iterator = P.begin();
while (!P.cardinality() == 0 && iterator != P.end())
{
auto v = *iterator;
auto &neighs = graph.out_neigh(v);
bronKerboschRec(R.union_with(v), P.intersect(neighs), X.intersect(neighs), sol, graph);
P.difference_inplace(v);
X.union_inplace(v);
if (!P.cardinality() == 0)
{
iterator = P.begin();
}
}
}
}
template <class SGraph, class Set = typename SGraph::Set>
std::vector<Set> mce(const CSRGraph &graph)
{
#ifdef BK_COUNT
BK_CLIQUE_COUNTER = 0; //initialize counter
#endif
size_t size = graph.num_nodes();
std::vector<Set> sol = {};
bronKerboschRec(Set(), Set::Range(size), Set(), sol, SGraph::FromCGraph(graph));
return sol;
}
} // namespace BkSimple
#endif /*BRONKERBOSCHSIMPLE_H*/ |
GB_unop__abs_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_int8_int8
// op(A') function: GB_unop_tran__abs_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
archive_blake2sp_ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "archive_blake2.h"
#include "archive_blake2_impl.h"
#define PARALLELISM_DEGREE 8
/*
blake2sp_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P )
{
int err = blake2s_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint32_t offset )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store16( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2sp_init_leaf_param( S, P );
}
static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store16( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, (uint32_t)i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, S->outlen );
}
int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[i], in__, len );
}
blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2sp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2sp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
SmallVector<const Expr *, 4> DepExprs;
explicit DependData() = default;
DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
: DepKind(DepKind), IteratorExpr(IteratorExpr) {}
};
SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the item shared between tasks to reduce into.
const Expr *Shared = nullptr;
/// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
const Expr *ReductionOp)
: Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// List of addresses of original variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns LValue for the original reduction item.
LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class UntiedTaskLocalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
UntiedTaskLocalDeclsRAII(
CodeGenModule &CGM,
const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, Address>
&LocalVars);
~UntiedTaskLocalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// Type typedef struct kmp_task_affinity_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool flag1 : 1;
/// bool flag2 : 1;
/// kmp_int32 reserved : 30;
/// } flags;
/// } kmp_task_affinity_info_t;
QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
using UntiedLocalVarsAddressesMap =
llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, Address>;
llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Get the function for the specified user-defined mapper. If it does not
/// exist, create one.
llvm::Function *
getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
/// Set to true if Clang emits separate runtime calls for the beginning and
/// end of the region. These calls might have separate map type arrays.
bool SeparateBeginEndCalls = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library for the beginning
/// of the region or for the entire region if there are no separate map
/// types for the region end.
llvm::Value *MapTypesArray = nullptr;
/// The array of map types passed to the runtime library for the end of the
/// region, or nullptr if there are no separate map types for the region
/// end.
llvm::Value *MapTypesArrayEnd = nullptr;
/// The array of user-defined mappers passed to the runtime library.
llvm::Value *MappersArray = nullptr;
/// Indicate whether any user-defined mapper exists.
bool HasMapper = false;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo,
bool SeparateBeginEndCalls)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo),
SeparateBeginEndCalls(SeparateBeginEndCalls) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
MapTypesArrayEnd = nullptr;
MappersArray = nullptr;
HasMapper = false;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address>
emitDependClause(CodeGenFunction &CGF,
ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs) for depobj construct. In this case, the
/// variable is allocated in dynamically. \returns Pointer to the first
/// element of the array casted to VoidPtr type.
Address emitDepobjDependClause(CodeGenFunction &CGF,
const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
/// Initializes user defined allocators specified in the uses_allocators
/// clauses.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
const Expr *AllocatorTraits);
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
coordination.c | /*
* Copyright (C) 2017 by Benedict Paten (benedictpaten@gmail.com)
*
* Released under the MIT license, see LICENSE.txt
*/
#include "margin.h"
// OpenMP
#if defined(_OPENMP)
#include <omp.h>
#define CELL_BUFFER_SIZE 1000
#endif
/*
* Functions to create a set of read partitioning HMMs that include a given input set of reads.
*/
stRPHmm *getNextClosestNonoverlappingHmm(stRPHmm *hmm1, stSortedSet *readHmms) {
/*
* Returns the HMM from the set readHmms that does not overlap hmm1
* but whose start coordinate is closest to
* the end coordinate of hmm1. If does not exist returns NULL.
*/
// Iterator in the set starting from hmm1
assert(stSortedSet_search(readHmms, hmm1) == hmm1);
stSortedSetIterator *it = stSortedSet_getIteratorFrom(readHmms, hmm1);
stRPHmm *hmm2 = stSortedSet_getNext(it);
assert(hmm2 == hmm1);
// For each hmm in readHmms whose coordinate is >= than hmm1's
while((hmm2 = stSortedSet_getNext(it)) != NULL) {
// Compare the hmm coordinates just to check that hmm2 has a coordinate >= to hmm1s
int i = stRPHmm_cmpFn(hmm1, hmm2);
assert(i <= 0);
// If hmm1 and hmm2 are on different references, then hmm2 is the closest non-overlapping
// hmm to hmm1 in reference space
i = strcmp(hmm1->ref->referenceName, hmm2->ref->referenceName);
if(i != 0) {
break;
}
// If hmm2 does not overlap hmm1 it must be the closest non-overlapping hmm to hmm1
if(hmm1->refStart + hmm1->refLength <= hmm2->refStart) {
break;
}
}
// Cleanup
stSortedSet_destructIterator(it);
return hmm2;
}
stSortedSet *makeComponent(stRPHmm *hmm, stSet *components, stHash *componentsHash) {
/*
* Create a component containing hmm and add the component to components.
*/
stSortedSet *component = stSortedSet_construct3(stRPHmm_cmpFn, NULL);
stSortedSet_insert(component, hmm);
stSet_insert(components, component);
assert(stHash_search(componentsHash, hmm) == NULL);
stHash_insert(componentsHash, hmm, component);
return component;
}
stSet *getOverlappingComponents(stList *tilingPath1, stList *tilingPath2) {
/*
* Two hmms overlap if their reference coordinate intervals overlaps.
* The transitive closure of the overlap relation
* partitions a set of hmms into connected components.
* This function returns this partition for the hmms in tilingPath1
* and tilingPath2, each of which is a set of hmms sorted by reference
* coordinate and which do not overlap in reference
* coordinates. Each component is a stSortedSet.
*/
// A map of hmms to components
stHash *componentsHash = stHash_construct();
// The set of components
stSet *components = stSet_construct2((void (*)(void *))stSortedSet_destruct);
// The "lagging" index of the hmm in tilingPath2 that could possibly overlap hmm1
int64_t j = 0;
// For each hmm in tilingPath1, in order
for(int64_t i=0; i<stList_length(tilingPath1); i++) {
stRPHmm *hmm1 = stList_get(tilingPath1, i);
// Start with the component being undefined
stSortedSet *component = NULL;
// The "leading" index of the hmm in tilingPath2 that could possibly overlap hmm1
int64_t k = 0;
// While there exists an hmm in tilingPath2 that precedes or overlaps with hmm1
while(j+k<stList_length(tilingPath2)) {
stRPHmm *hmm2 = stList_get(tilingPath2, j+k); // Note the j+k
// If hmm1 and hmm2 overlap
if(stRPHmm_overlapOnReference(hmm1, hmm2)) {
// The leading index is increased
k++;
// If component is still NULL
if(component == NULL) {
// Look for a component for hmm2
component = stHash_search(componentsHash, hmm2);
// If hmm2 has no component make one
if(component == NULL) {
component = makeComponent(hmm2, components, componentsHash);
}
// Add hmm1 to the component
assert(stSortedSet_search(component, hmm1) == NULL);
assert(stHash_search(componentsHash, hmm1) == NULL);
stSortedSet_insert(component, hmm1);
stHash_insert(componentsHash, hmm1, component);
}
// Otherwise component is defined
else {
// Add hmm2 to the component
assert(stSortedSet_search(component, hmm2) == NULL);
assert(stHash_search(componentsHash, hmm2) == NULL); // Impossible to be defined,
// as implies that two
// hmms in tilingPath2 each both overlap two hmms in tilingPath1.
stSortedSet_insert(component, hmm2);
stHash_insert(componentsHash, hmm2, component);
}
}
// Else hmm1 and hmm2 do not overlap
else {
// If hmm1 occurs before hmm2 in the reference ordering
if(stRPHmm_cmpFn(hmm1, hmm2) < 0) {
// If has no component, make a trivial component containing just hmm1
// (it doesn't overlap with any other hmm)
if(component == NULL) {
component = makeComponent(hmm1, components, componentsHash);
}
// Done with hmm1
break;
}
// else hmm2 occurs before hmm1 in the reference ordering
else {
// Add hmm2 to a trivial component if it does not overlap an HMM in tiling path1
if(stHash_search(componentsHash, hmm2) == NULL) {
makeComponent(hmm2, components, componentsHash);
}
// Increase the lagging index as hmm1 and proceding hmms can not overlap with hmm2
j++;
}
}
}
if(component == NULL) {
//
assert(stHash_search(componentsHash, hmm1) == NULL);
makeComponent(hmm1, components, componentsHash);
}
}
// For any remaining hmms in tilingPath2 that have not been placed in a component
// put them in a component
while(j < stList_length(tilingPath2)) {
stRPHmm *hmm2 = stList_get(tilingPath2, j++);
if(stHash_search(componentsHash, hmm2) == NULL) {
makeComponent(hmm2, components, componentsHash);
}
}
// Cleanup
stHash_destruct(componentsHash);
return components;
}
stList *getTilingPaths(stSortedSet *hmms) {
/*
* Takes set of hmms ordered by reference coordinate (see stRPHmm_cmpFn) and returns
* a list of tiling paths. Each tiling path consisting of maximal sequences of hmms
* that do not overlap. Destroys sortedSet in the process.
*/
stList *tilingPaths = stList_construct();
while(stSortedSet_size(hmms) > 0) {
// Make an empty tiling path and add to set of tiling paths built so far
stList *tilingPath = stList_construct();
stList_append(tilingPaths, tilingPath);
// Get the hmm with lowest reference coordinate and add to the tiling path
stRPHmm *hmm = stSortedSet_getFirst(hmms);
assert(hmm != NULL);
assert(stSortedSet_search(hmms, hmm) == hmm);
stList_append(tilingPath, hmm);
// While it exists, get the next closest non-overlapping hmm
// and add to the tiling path progressively, removing the chain of hmms from the
// set of hmms left to tile
stRPHmm *hmm2;
while((hmm2 = getNextClosestNonoverlappingHmm(hmm, hmms)) != NULL) {
stSortedSet_remove(hmms, hmm);
stList_append(tilingPath, hmm2);
hmm = hmm2;
assert(stSortedSet_search(hmms, hmm) == hmm);
}
stSortedSet_remove(hmms, hmm);
}
// Cleanup the input set
stSortedSet_destruct(hmms);
return tilingPaths;
}
stList *getTilingPaths2(stList *profileSeqs, stRPHmmParameters *params) {
/*
* Takes a set of profile sequences (stProfileSeq) and returns
* a list of tiling paths. Each tiling path consisting of maximal sequences of hmms
* that do not overlap.
*/
// Create a read partitioning HMM for every sequence and put in ordered set, ordered by reference coordinate
stSortedSet *readHmms = stSortedSet_construct3(stRPHmm_cmpFn, (void (*)(void *))stRPHmm_destruct2);
for(int64_t i=0; i<stList_length(profileSeqs); i++) {
stProfileSeq *pSeq = stList_get(profileSeqs, i);
stRPHmm *hmm = stRPHmm_construct(pSeq, params);
stSortedSet_insert(readHmms, hmm);
}
assert(stSortedSet_size(readHmms) == stList_length(profileSeqs));
// Organise HMMs into "tiling paths" consisting of sequences of hmms that do not overlap
return getTilingPaths(readHmms);
}
stRPHmm *fuseTilingPath(stList *tilingPath) {
/*
* Fuse together the hmms in the tiling path into one hmm.
* Destroys the tiling path and cleans it up.
*/
stRPHmm *rightHmm = stList_pop(tilingPath);
// While there remain other hmms in the list fuse them together
while(stList_length(tilingPath) > 0) {
stRPHmm *leftHmm = stList_pop(tilingPath);
rightHmm = stRPHmm_fuse(leftHmm, rightHmm);
}
// Cleanup
stList_destruct(tilingPath);
return rightHmm;
}
stList *mergeTwoTilingPaths(stList *tilingPath1, stList *tilingPath2) {
/*
* Takes two lists, tilingPath1 and tilingPath2, each of which is a set of hmms
* ordered by reference coordinates and
* non-overlapping in reference coordinates.
* Merges together the hmms and returns a single tiling path as a result in the
* same format as the input lists.
* Destroys the input tilingPaths in the process and cleans them up.
*/
// Partition of the hmms into overlapping connected components
stSet *components = getOverlappingComponents(tilingPath1, tilingPath2);
// Cleanup the input tiling paths
stList_destruct(tilingPath1);
stList_destruct(tilingPath2);
// The output tiling path, which starts out empty
stList *newTilingPath = stList_construct();
// Fuse the hmms
// For each component of overlapping hmms
stList *componentsList = stSet_getList(components);
for(int64_t i=0; i<stList_length(componentsList); i++) {
stSortedSet *component = stList_get(componentsList, i);
stSet_remove(components, component);
// Make two sub-tiling paths (there can only be two maximal paths, by definition)
stList *tilingPaths = getTilingPaths(component);
stRPHmm *hmm = NULL;
if(stList_length(tilingPaths) == 2) {
stList *subTilingPath1 = stList_get(tilingPaths, 0);
stList *subTilingPath2 = stList_get(tilingPaths, 1);
// Fuse the hmms in each sub tiling path
stRPHmm *hmm1 = fuseTilingPath(subTilingPath1);
stRPHmm *hmm2 = fuseTilingPath(subTilingPath2);
// Align
stRPHmm_alignColumns(hmm1, hmm2);
// Merge
hmm = stRPHmm_createCrossProductOfTwoAlignedHmm(hmm1, hmm2);
stRPHmm_destruct(hmm1, 1);
stRPHmm_destruct(hmm2, 1);
// Prune
stRPHmm_forwardBackward(hmm);
stRPHmm_prune(hmm);
}
else { // Case that component is just one hmm that does not
// overlap anything else
assert(stList_length(tilingPaths) == 1);
stList *subTilingPath1 = stList_get(tilingPaths, 0);
assert(stList_length(subTilingPath1) == 1);
hmm = stList_pop(subTilingPath1);
stList_destruct(subTilingPath1);
}
// Add to output tiling path
stList_append(newTilingPath, hmm);
stList_destruct(tilingPaths);
}
//Cleanup
stList_destruct(componentsList);
stSet_destruct(components);
// Sort new tiling path
stList_sort(newTilingPath, stRPHmm_cmpFn);
return newTilingPath;
}
stList *mergeTilingPaths(stList *tilingPaths) {
/*
* Like mergeTwoTilingPaths(), except instead of just two tiling paths it takes a list.
* Destroys the tiling path as it goes.
*/
// If no tiling paths in input warn and return an empty tiling path
if(stList_length(tilingPaths) == 0) {
st_logCritical("WARNING: Zero tiling paths to merge\n");
stList_destruct(tilingPaths);
return stList_construct();
}
// If only one tiling path in the input, the output is just the single input tiling path
if(stList_length(tilingPaths) == 1) {
stList *tilingPath = stList_get(tilingPaths, 0);
stList_destruct(tilingPaths);
return tilingPath;
}
stList *tilingPath1;
stList *tilingPath2;
// If there are more than two tiling paths
// split the problem into two recursively until there are just two remaining
// tiling paths
if(stList_length(tilingPaths) > 2) {
// Recursively turn the first half of the tiling paths into one tiling path
stList *tilingPaths1 = stList_construct();
for(int64_t i=0; i<stList_length(tilingPaths)/2; i++) {
stList_append(tilingPaths1, stList_get(tilingPaths, i));
}
// Recursively turn the other half of the tiling paths into the other tiling path
stList *tilingPaths2 = stList_construct();
for(int64_t i=stList_length(tilingPaths)/2; i < stList_length(tilingPaths); i++) {
stList_append(tilingPaths2, stList_get(tilingPaths, i));
}
#if defined(_OPENMP)
#pragma omp parallel
{
#pragma omp sections nowait
{
#pragma omp section
tilingPath1 = mergeTilingPaths(tilingPaths1);
#pragma omp section
tilingPath2 = mergeTilingPaths(tilingPaths2);
}
}
#else
tilingPath1 = mergeTilingPaths(tilingPaths1);
tilingPath2 = mergeTilingPaths(tilingPaths2);
#endif
}
// Otherwise the number of tiling paths is two
else {
tilingPath1 = stList_get(tilingPaths, 0);
tilingPath2 = stList_get(tilingPaths, 1);
}
// Merge together the two tiling paths and return result
assert(tilingPath1 != NULL);
assert(tilingPath2 != NULL);
stList_destruct(tilingPaths);
return mergeTwoTilingPaths(tilingPath1, tilingPath2);
}
static void getProfileSeqs(stList *tilingPath, stList *pSeqs) {
while(stList_length(tilingPath) > 0) {
stRPHmm *hmm = stList_pop(tilingPath);
assert(stList_length(hmm->profileSeqs) == 1);
stProfileSeq *pSeq = stList_peek(hmm->profileSeqs);
stRPHmm_destruct(hmm, 1);
stList_append(pSeqs, pSeq);
}
stList_destruct(tilingPath);
}
static int64_t tilingPathSize(stList *tilingPath) {
/*
* Returns the sum of the length of the profile sequences the tiling path contains
*/
int64_t totalLength = 0;
for(int64_t i=0; i<stList_length(tilingPath); i++) {
stRPHmm *hmm = stList_get(tilingPath, i);
assert(stList_length(hmm->profileSeqs) == 1);
stProfileSeq *pSeq = stList_peek(hmm->profileSeqs);
totalLength += pSeq->length;
}
return totalLength;
}
int tilingPathsCmpFn(stList *tilingPath1, stList *tilingPath2, stHash *tilingPathLengths) {
int64_t length1 = *(int64_t *)stHash_search(tilingPathLengths, tilingPath1);
int64_t length2 = *(int64_t *)stHash_search(tilingPathLengths, tilingPath2);
return length1 < length2 ? 1 : (length1 > length2 ? -1 : 0);
}
stList *filterReadsByCoverageDepth(stList *profileSeqs, stRPHmmParameters *params,
stList *filteredProfileSeqs, stList *discardedProfileSeqs) {
/*
* Takes a set of profile sequences and returns a subset such that maximum coverage depth of the subset is
* less than or equal to params->maxCoverageDepth. The discarded sequences are placed in the list
* "discardedProfileSeqs", the retained sequences are placed in filteredProfileSeqs.
*/
// Create a set of tiling paths
stList *tilingPaths = getTilingPaths2(profileSeqs, params);
st_logDebug("Got maximum tiling depth of: %i\n", (int)stList_length(tilingPaths));
// Sort tiling paths my numbers of reads included
stHash *tilingPathLengths = stHash_construct2(NULL, free);
for(int64_t i=0; i<stList_length(tilingPaths); i++) {
stList *tilingPath = stList_get(tilingPaths, i);
int64_t *length = st_calloc(1, sizeof(int64_t));
*length = tilingPathSize(tilingPath);
stHash_insert(tilingPathLengths, tilingPath, length);
}
stList_sort2(tilingPaths, (int (*)(const void *, const void *, const void *))tilingPathsCmpFn, tilingPathLengths);
stHash_destruct(tilingPathLengths);
// Eliminate reads until the maximum coverage depth to less than the give threshold
while(stList_length(tilingPaths) > params->maxCoverageDepth) {
stList *tilingPath = stList_pop(tilingPaths);
st_logDebug("Discarding %i profiling sequences of total length: %i\n", (int)stList_length(tilingPath), (int)tilingPathSize(tilingPath));
getProfileSeqs(tilingPath, discardedProfileSeqs);
}
while(stList_length(tilingPaths) > 0) {
getProfileSeqs(stList_pop(tilingPaths), filteredProfileSeqs);
}
// Cleanup
stList_destruct(tilingPaths);
st_logInfo("\tFiltered %" PRIi64 " reads of %" PRIi64
" to achieve maximum coverage depth of %" PRIi64 "\n",
stList_length(discardedProfileSeqs), stList_length(profileSeqs),
params->maxCoverageDepth);
return filteredProfileSeqs;
}
stList *getRPHmms(stList *profileSeqs, stRPHmmParameters *params) {
/*
* Takes a set of profile sequences (stProfileSeq) and returns a list of read partitioning
* hmms (stRPHmm) ordered and non-overlapping in reference coordinates.
* referenceNamesToReferencePriors is a map from reference sequence names to corresponding
* stReferencePriorProbs objects.
*/
// Create a read partitioning HMM for every sequence and put in ordered set, ordered by reference coordinate
stList *tilingPaths = getTilingPaths2(profileSeqs, params);
if(stList_length(tilingPaths) > MAX_READ_PARTITIONING_DEPTH
|| stList_length(tilingPaths) > params->maxCoverageDepth) {
st_errAbort("\nCoverage depth: read depth of %" PRIi64 " exceeds hard maximum of %" PRIi64
" with configured maximum of %" PRIi64 "\n",
stList_length(tilingPaths), MAX_READ_PARTITIONING_DEPTH, params->maxCoverageDepth);
}
else {
st_logDebug("Got %i tiling paths from which to build hmms for max coverage depth of: %i\n", (int)stList_length(tilingPaths), (int)params->maxCoverageDepth);
}
// Merge together the tiling paths into one merged tiling path, merging the individual hmms when
// they overlap on the reference
stList *finalTilingPath = mergeTilingPaths(tilingPaths);
stList_setDestructor(finalTilingPath, (void (*)(void *))stRPHmm_destruct2);
return finalTilingPath;
}
|
main.c | /**
* program: dadafilterbank
*
* Purpose: connect to a ring buffer and create Sigproc output per TAB on request
*
* A ringbuffer page is interpreted as an array of Stokes I:
* [NTABS, NCHANNELS, padded_size] = [12, 1536, > 25000]
*
* Written for the AA-Alert project, ASTRON
*
* Author: Jisk Attema, Netherlands eScience Center
* Licencse: Apache v2.0
*/
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <getopt.h>
#include <errno.h>
#include <signal.h>
#include "dada_hdu.h"
#include "ascii_header.h"
#include "filterbank.h"
#include "config.h"
#define MAXTABS 12
int output[MAXTABS];
FILE *runlog = NULL;
#define LOG(...) {fprintf(stdout, __VA_ARGS__); fprintf(runlog, __VA_ARGS__); fflush(stdout); fflush(runlog);}
// Hardcoded parameters
const unsigned int nchannels = 1536; // Must be divisible by 6 for the current transpose/inverse implementation
const unsigned int nbit = 8;
// Parameters read from ringbuffer header block (with default to lowest data rate)
int science_case = 3;
int science_mode = 2;
int padded_size = 12500;
double min_frequency;
double bandwidth;
double ra;
double dec;
char source_name[256];
double az_start;
double za_start;
double mjd_start;
// Derived parameters (with default to lowest data rate)
double tsamp = 1.024 / 12500;
int ntimes = 12500;
int ntabs = 1;
/**
* Open a connection to the ringbuffer
*
* @param {char *} key String containing the shared memory key as hexadecimal number
* @returns {hdu *} A connected HDU
*/
dada_hdu_t *init_ringbuffer(char *key) {
uint64_t nbufs;
int header_incomplete = 0;
multilog_t* multilog = NULL; // TODO: See if this is used in anyway by dada
// create hdu
dada_hdu_t *hdu = dada_hdu_create (multilog);
// init key
key_t shmkey;
sscanf(key, "%x", &shmkey);
dada_hdu_set_key(hdu, shmkey);
LOG("dadafilterbank SHMKEY: %s\n", key);
// connect
if (dada_hdu_connect (hdu) < 0) {
LOG("ERROR in dada_hdu_connect\n");
exit(EXIT_FAILURE);
}
// Make data buffers readable
if (dada_hdu_lock_read(hdu) < 0) {
LOG("ERROR in dada_hdu_open_view\n");
exit(EXIT_FAILURE);
}
// get write address
char *header;
uint64_t bufsz;
header = ipcbuf_get_next_read (hdu->header_block, &bufsz);
if (! header || ! bufsz) {
LOG("ERROR. Get next header block error\n");
exit(EXIT_FAILURE);
}
// parse header
if(ascii_header_get(header, "MIN_FREQUENCY", "%lf", &min_frequency) == -1) {
LOG("ERROR. MIN_FREQUENCY not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "BW", "%lf", &bandwidth) == -1) {
LOG("ERROR. BW not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "RA", "%lf", &ra) == -1) {
LOG("ERROR. RA not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "DEC", "%lf", &dec) == -1) {
LOG("ERROR. DEC not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SOURCE", "%s", source_name) == -1) {
LOG("ERROR. SOURCE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "AZ_START", "%lf", &az_start) == -1) {
LOG("ERROR. AZ_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "ZA_START", "%lf", &za_start) == -1) {
LOG("ERROR. ZA_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "MJD_START", "%lf", &mjd_start) == -1) {
LOG("ERROR. MJD_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SCIENCE_CASE", "%i", &science_case) == -1) {
LOG("ERROR. SCIENCE_CASE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SCIENCE_MODE", "%i", &science_mode) == -1) {
LOG("ERROR. SCIENCE_MODE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "PADDED_SIZE", "%i", &padded_size) == -1) {
LOG("ERROR. PADDED_SIZE not set in dada buffer\n");
header_incomplete = 1;
}
// tell the ringbuffer the header has been read
if (ipcbuf_mark_cleared(hdu->header_block) < 0) {
LOG("ERROR. Cannot mark the header as cleared\n");
exit(EXIT_FAILURE);
}
LOG("psrdada HEADER:\n%s\n", header);
if (header_incomplete) {
exit(EXIT_FAILURE);
}
return hdu;
}
/**
* Print commandline options
*/
void printOptions() {
printf("usage: dadafilterbank -k <hexadecimal key> -l <logfile> -n <filename prefix for dumps>\n");
printf("e.g. dadafits -k dada -l log.txt -n myobs\n");
return;
}
/**
* Parse commandline
*/
void parseOptions(int argc, char *argv[], char **key, char **prefix, char **logfile) {
int c;
int setk=0, setl=0, setn=0;
while((c=getopt(argc,argv,"b:c:m:k:l:n:"))!=-1) {
switch(c) {
// -k <hexadecimal_key>
case('k'):
*key = strdup(optarg);
setk=1;
break;
// -l log file
case('l'):
*logfile = strdup(optarg);
setl=1;
break;
// -n <filename prefix>
case('n'):
setn=1;
*prefix = strdup(optarg);
break;
// -h
case('h'):
printOptions();
exit(EXIT_SUCCESS);
break;
default:
fprintf(stderr, "Unknown option '%c'\n", c);
exit(EXIT_FAILURE);
break;
}
}
// All arguments are required
if (!setk || !setl || !setn) {
if (!setk) fprintf(stderr, "Error: DADA key not set\n");
if (!setl) fprintf(stderr, "Error: Log file not set\n");
if (!setn) fprintf(stderr, "Error: Filename prefix not set\n");
exit(EXIT_FAILURE);
}
}
void open_files(char *prefix, int ntabs) {
int tab;
for (tab=0; tab<ntabs; tab++) {
char fname[256];
if (ntabs == 1) {
snprintf(fname, 256, "%s.fil", prefix);
}
else {
snprintf(fname, 256, "%s_%02i.fil", prefix, tab);
}
// open filterbank file
output[tab] = filterbank_create(
fname, // filename
10, // int telescope_id,
15, // int machine_id,
source_name, // char *source_name,
az_start, // double az_start,
za_start, // double za_start,
ra, // double src_raj,
dec, // double src_dej,
mjd_start, // double tstart
tsamp, // double tsamp,
nbit, // int nbits,
min_frequency + bandwidth - (bandwidth / nchannels), // double fch1,
-1 * bandwidth / nchannels, // double foff,
nchannels, // int nchans,
ntabs, // int nbeams,
tab, // int ibeam
1 // int nifs
);
}
}
void close_files() {
int tab;
for (tab=0; tab<ntabs; tab++) {
filterbank_close(output[tab]);
}
}
/**
* Catch SIGINT then sync and close files before exiting
*/
void sigint_handler (int sig) {
LOG("SIGINT received, aborting\n");
int i;
for (i=0; i<ntabs; i++) {
if (output[i]) {
fsync(output[i]);
filterbank_close(output[i]);
}
}
exit(EXIT_FAILURE);
}
int main (int argc, char *argv[]) {
char *key;
char *logfile;
char *file_prefix;
// parse commandline
parseOptions(argc, argv, &key, &file_prefix, &logfile);
// set up logging
if (logfile) {
runlog = fopen(logfile, "w");
if (! runlog) {
LOG("ERROR opening logfile: %s\n", logfile);
exit(EXIT_FAILURE);
}
LOG("Logging to logfile: %s\n", logfile);
free (logfile);
}
// connect to ring buffer
dada_hdu_t *ringbuffer = init_ringbuffer(key);
ipcbuf_t *data_block = (ipcbuf_t *) ringbuffer->data_block;
ipcio_t *ipc = ringbuffer->data_block;
if (science_case == 3) {
// NTIMES (12500) per 1.024 seconds -> 0.00008192 [s]
ntimes = 12500;
tsamp = 1.024 / 12500;
ntabs = 9;
} else if (science_case == 4) {
// NTIMES (12500) per 1.024 seconds -> 0.00008192 [s]
ntimes = 12500;
tsamp = 1.024 / 12500;
ntabs = 12;
} else {
LOG("Error: Illegal science case '%i'", science_mode);
exit(EXIT_FAILURE);
}
LOG("dadafilterbank version: " VERSION "\n");
LOG("Science case = %i\n", science_case);
LOG("Filename prefix = %s\n", file_prefix);
if (science_mode == 0) {
// I + TAB
LOG("Science mode: 0 [I + TAB]\n");
} else if (science_mode == 2) {
// I + IAB
// Overwrite NTABS to be one
ntabs = 1;
LOG("Science mode: 2 [I + IAB]\n");
} else if (science_mode == 1 || science_mode == 3) {
LOG("Error: modes 1 [IQUV + TAB] / 3 [IQUV + IAB] not supported");
exit(EXIT_FAILURE);
} else {
LOG("Error: Illegal science mode '%i'", science_mode);
exit(EXIT_FAILURE);
}
// create filterbank files, and close files on C-c
open_files(file_prefix, ntabs);
signal(SIGINT, sigint_handler);
// for interaction with ringbuffer
uint64_t bufsz = ipc->curbufsz;
char *page = NULL;
// for processing a page
int tab, channel, time;
char *buffer = malloc(ntabs * ntimes * nchannels * sizeof(char));
int page_count = 0;
int quit = 0;
while(!quit && !ipcbuf_eod(data_block)) {
page = ipcbuf_get_next_read(data_block, &bufsz);
if (! page) {
quit = 1;
} else {
// page [NTABS, nchannels, time(padded_size)]
// file [time, nchannels]
for (tab = 0; tab < ntabs; tab++) {
int channel;
#pragma omp parallel for
for (channel = 0; channel < nchannels; channel+=6) {
const char *channelA = &page[(tab*nchannels + channel + 0)*padded_size];
const char *channelB = &page[(tab*nchannels + channel + 1)*padded_size];
const char *channelC = &page[(tab*nchannels + channel + 2)*padded_size];
const char *channelD = &page[(tab*nchannels + channel + 3)*padded_size];
const char *channelE = &page[(tab*nchannels + channel + 4)*padded_size];
const char *channelF = &page[(tab*nchannels + channel + 5)*padded_size];
int time;
for (time = 0; time < ntimes; time++) {
// reverse freq order to comply with header
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+0)-1] = channelA[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+1)-1] = channelB[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+2)-1] = channelC[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+3)-1] = channelD[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+4)-1] = channelE[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+5)-1] = channelF[time];
}
}
ssize_t size = write(output[tab], &buffer[tab*ntimes*nchannels], sizeof(char) * ntimes * nchannels);
}
ipcbuf_mark_cleared((ipcbuf_t *) ipc);
page_count++;
}
}
if (ipcbuf_eod(data_block)) {
LOG("End of data received\n");
}
dada_hdu_unlock_read(ringbuffer);
dada_hdu_disconnect(ringbuffer);
free(buffer);
LOG("Read %i pages\n", page_count);
}
|
mat_mul_simd_9000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp simd private(j, t, k)
for(i = 0; i <= 8999; i += 1)
for(j = 0; j <= 8999; j += 1) {
c[i*9000+j] = 0;
for(k = 0; k <= 8999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*9000+j] += a[i*9000+k]*b[j*9000+k];
}
return;
}
|
nested_lwt.c | // RUN: %libomp-compile-and-run | FileCheck %s
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main()
{
omp_set_nested(1);
#pragma omp parallel num_threads(4)
{
print_ids(0);
print_ids(1);
#pragma omp parallel num_threads(1)
{
print_ids(0);
print_ids(1);
print_ids(2);
#pragma omp parallel num_threads(4)
{
print_ids(0);
print_ids(1);
print_ids(2);
print_ids(3);
}
}
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:.+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end!
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS: 0: NULL_POINTER=[[NULL:.*$]]
// THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:.+]]
// nested parallel masters
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[MASTER_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[NESTED_TASK_FRAME_EXIT]], parent_task_frame.reenter=[[NESTED_TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, parallel_function=[[NESTED_PARALLEL_FUNCTION:0x[0-f]+]], invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[MASTER_ID]]: level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[MASTER_ID]]: level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit=[[NESTED_NESTED_TASK_FRAME_EXIT]], parent_task_frame.reenter=[[NESTED_NESTED_TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[NESTED_NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_NESTED_PARALLEL_FUNCTION:0x[0-f]+]], invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: level 0: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[MASTER_ID]]: level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[MASTER_ID]]: level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[MASTER_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[NESTED_TASK_FRAME_EXIT]], parent_task_frame.reenter=[[NESTED_TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit=[[NESTED_NESTED_TASK_FRAME_EXIT]], parent_task_frame.reenter=[[NESTED_NESTED_TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[NESTED_NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[NESTED_TASK_FRAME_EXIT]], parent_task_frame.reenter=[[NESTED_TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit=[[NESTED_NESTED_TASK_FRAME_EXIT]], parent_task_frame.reenter=[[NESTED_NESTED_TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[NESTED_NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[NESTED_TASK_FRAME_EXIT]], parent_task_frame.reenter=[[NESTED_TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit=[[NESTED_NESTED_TASK_FRAME_EXIT]], parent_task_frame.reenter=[[NESTED_NESTED_TASK_FRAME_ENTER:0x[0-f]+]], parallel_id=[[NESTED_NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_NESTED_TASK_FRAME_EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NESTED_TASK_FRAME_EXIT]], reenter_frame=[[NESTED_TASK_FRAME_ENTER]]
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// nested parallel worker threads
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: level 3: parallel_id=0, task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[TASK_FRAME_ENTER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
integrator_whfasthelio.c | /**
* @file integrator_whfasthelio.c
* @brief WHFASTHELIO integration scheme.
* @author Hanno Rein <hanno@hanno-rein.de>
* @details This file implements the WHFast integration scheme in
* Heliocentric Coordinates.
* Based on WHFast, described in Rein & Tamayo 2015.
*
* @section LICENSE
* Copyright (c) 2016 Hanno Rein, Daniel Tamayo
*
* This file is part of rebound.
*
* rebound is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* rebound is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with rebound. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <sys/time.h>
#include "rebound.h"
#include "particle.h"
#include "gravity.h"
#include "integrator.h"
#include "integrator_whfast.h"
#include "integrator_whfasthelio.h"
/******************************
* Coordinate transformations */
static void to_helio_posvel(const struct reb_particle* const particles, struct reb_particle* const p_h, const int N){
p_h[0].x = 0.;
p_h[0].y = 0.;
p_h[0].z = 0.;
p_h[0].vx = 0.;
p_h[0].vy = 0.;
p_h[0].vz = 0.;
p_h[0].m = 0.;
for (unsigned int i=0;i<N;i++){
p_h[0].x += particles[i].x *particles[i].m;
p_h[0].y += particles[i].y *particles[i].m;
p_h[0].z += particles[i].z *particles[i].m;
p_h[0].vx += particles[i].vx*particles[i].m;
p_h[0].vy += particles[i].vy*particles[i].m;
p_h[0].vz += particles[i].vz*particles[i].m;
p_h[0].m += particles[i].m;
}
p_h[0].x /= p_h[0].m;
p_h[0].y /= p_h[0].m;
p_h[0].z /= p_h[0].m;
p_h[0].vx /= p_h[0].m;
p_h[0].vy /= p_h[0].m;
p_h[0].vz /= p_h[0].m;
for (unsigned int i=1;i<N;i++){
p_h[i].x = particles[i].x - particles[0].x ;
p_h[i].y = particles[i].y - particles[0].y ;
p_h[i].z = particles[i].z - particles[0].z ;
p_h[i].vx = particles[i].vx - p_h[0].vx;
p_h[i].vy = particles[i].vy - p_h[0].vy;
p_h[i].vz = particles[i].vz - p_h[0].vz;
p_h[i].m = particles[i].m;
}
}
static void to_inertial_pos(struct reb_particle* const particles, const struct reb_particle* const p_h, const int N){
const double mtot = p_h[0].m;
particles[0].x = p_h[0].x;
particles[0].y = p_h[0].y;
particles[0].z = p_h[0].z;
for (unsigned int i=1;i<N;i++){
particles[0].x -= p_h[i].x*particles[i].m/mtot;
particles[0].y -= p_h[i].y*particles[i].m/mtot;
particles[0].z -= p_h[i].z*particles[i].m/mtot;
}
for (unsigned int i=1;i<N;i++){
particles[i].x = p_h[i].x+particles[0].x;
particles[i].y = p_h[i].y+particles[0].y;
particles[i].z = p_h[i].z+particles[0].z;
}
}
static void to_inertial_posvel(struct reb_particle* const particles, const struct reb_particle* const p_h, const int N){
to_inertial_pos(particles,p_h,N);
const double mtot = p_h[0].m;
const double m0 = particles[0].m;
for (unsigned int i=1;i<N;i++){
particles[i].vx = p_h[i].vx+p_h[0].vx;
particles[i].vy = p_h[i].vy+p_h[0].vy;
particles[i].vz = p_h[i].vz+p_h[0].vz;
}
particles[0].vx = p_h[0].vx*mtot/m0;
particles[0].vy = p_h[0].vy*mtot/m0;
particles[0].vz = p_h[0].vz*mtot/m0;
for (unsigned int i=1;i<N;i++){
particles[0].vx -= particles[i].vx*particles[i].m/m0;
particles[0].vy -= particles[i].vy*particles[i].m/m0;
particles[0].vz -= particles[i].vz*particles[i].m/m0;
}
}
/*****************************
* Operators */
static void reb_whfasthelio_jump_step(const struct reb_simulation* const r, double _dt){
const double m0 = r->particles[0].m;
const int N_real = r->N-r->N_var;
struct reb_particle* const p_h = r->ri_whfasthelio.p_h;
double px = 0;
double py = 0;
double pz = 0;
for (unsigned int i=1;i<N_real;i++){
px += p_h[i].m* p_h[i].vx;
py += p_h[i].m* p_h[i].vy;
pz += p_h[i].m* p_h[i].vz;
}
for (unsigned int i=1;i<N_real;i++){
p_h[i].x += _dt * px/m0;
p_h[i].y += _dt * py/m0;
p_h[i].z += _dt * pz/m0;
}
}
static void reb_whfasthelio_interaction_step(const struct reb_simulation* const r, const double _dt){
struct reb_particle* particles = r->particles;
const int N_real = r->N-r->N_var;
struct reb_particle* const p_h = r->ri_whfasthelio.p_h;
for (unsigned int i=1;i<N_real;i++){
p_h[i].vx += _dt*particles[i].ax;
p_h[i].vy += _dt*particles[i].ay;
p_h[i].vz += _dt*particles[i].az;
}
}
static void reb_whfasthelio_keplerstep(const struct reb_simulation* const r, const double _dt){
const int N_real = r->N-r->N_var;
struct reb_particle* const p_h = r->ri_whfasthelio.p_h;
const double M = r->particles[0].m*r->G;
#pragma omp parallel for
for (unsigned int i=1;i<N_real;i++){
kepler_step(r, p_h, M, i, _dt);
}
p_h[0].x += _dt*p_h[0].vx;
p_h[0].y += _dt*p_h[0].vy;
p_h[0].z += _dt*p_h[0].vz;
}
/*****************************
* Correctors */
static void reb_whfasthelio_corrector_Z(struct reb_simulation* r, const double a, const double b){
struct reb_simulation_integrator_whfasthelio* const ri_whfasthelio = &(r->ri_whfasthelio);
struct reb_particle* restrict const particles = r->particles;
const int N_real = r->N-r->N_var;
reb_whfasthelio_keplerstep(r, a);
to_inertial_pos(particles, ri_whfasthelio->p_h, N_real);
reb_update_acceleration(r);
reb_whfasthelio_interaction_step(r,-b);
reb_whfasthelio_jump_step(r,-b);
reb_whfasthelio_keplerstep(r, -2.*a);
to_inertial_pos(particles, ri_whfasthelio->p_h, N_real);
reb_update_acceleration(r);
reb_whfasthelio_interaction_step(r,b);
reb_whfasthelio_jump_step(r,b);
reb_whfasthelio_keplerstep(r, a);
}
void reb_integrator_whfasthelio_part1(struct reb_simulation* const r){
if (r->var_config_N){
reb_exit("WHFastHELIO does currently not work with variational equations.");
}
struct reb_simulation_integrator_whfasthelio* const ri_whfasthelio = &(r->ri_whfasthelio);
struct reb_particle* restrict const particles = r->particles;
const int N_real = r->N - r->N_var;
r->gravity_ignore_terms = 2;
if (ri_whfasthelio->allocated_N != N_real){
ri_whfasthelio->allocated_N = N_real;
ri_whfasthelio->p_h = realloc(ri_whfasthelio->p_h,sizeof(struct reb_particle)*N_real);
ri_whfasthelio->recalculate_heliocentric_this_timestep = 1;
}
if (ri_whfasthelio->safe_mode || ri_whfasthelio->recalculate_heliocentric_this_timestep == 1){
if (ri_whfasthelio->is_synchronized==0){
reb_integrator_whfasthelio_synchronize(r);
if (ri_whfasthelio->recalculate_heliocentric_but_not_synchronized_warning==0){
reb_warning(r,"Recalculating heliocentric coordinates but pos/vel were not synchronized before.");
ri_whfasthelio->recalculate_heliocentric_but_not_synchronized_warning++;
}
}
ri_whfasthelio->recalculate_heliocentric_this_timestep = 0;
to_helio_posvel(particles, ri_whfasthelio->p_h, N_real);
}
if (ri_whfasthelio->is_synchronized==1){
// First half DRIFT step
if (ri_whfasthelio->corrector){
reb_whfast_apply_corrector(r, 1.,ri_whfasthelio->corrector,reb_whfasthelio_corrector_Z);
}
reb_whfasthelio_keplerstep(r,r->dt/2.);
}else{
// Combined DRIFT step
reb_whfasthelio_keplerstep(r,r->dt);
}
// For force calculation:
if (r->force_is_velocity_dependent){
to_inertial_posvel(particles, ri_whfasthelio->p_h, N_real);
}else{
to_inertial_pos(particles, ri_whfasthelio->p_h, N_real);
}
r->t+=r->dt/2.;
}
void reb_integrator_whfasthelio_synchronize(struct reb_simulation* const r){
struct reb_simulation_integrator_whfasthelio* const ri_whfasthelio = &(r->ri_whfasthelio);
if (ri_whfasthelio->is_synchronized==0){
const int N_real = r->N - r->N_var;
struct reb_particle* sync_ph = NULL;
if (ri_whfasthelio->keep_unsynchronized){
sync_ph = malloc(sizeof(struct reb_particle)*r->N);
memcpy(sync_ph,r->ri_whfasthelio.p_h,r->N*sizeof(struct reb_particle));
}
struct reb_particle* restrict const particles = r->particles;
reb_whfasthelio_keplerstep(r,r->dt/2.);
if (ri_whfasthelio->corrector){
reb_whfast_apply_corrector(r, -1.,ri_whfasthelio->corrector,reb_whfasthelio_corrector_Z);
}
to_inertial_posvel(particles, ri_whfasthelio->p_h, N_real);
if (ri_whfasthelio->keep_unsynchronized){
memcpy(r->ri_whfasthelio.p_h,sync_ph,r->N*sizeof(struct reb_particle));
free(sync_ph);
}else{
ri_whfasthelio->is_synchronized=1;
}
}
}
void reb_integrator_whfasthelio_part2(struct reb_simulation* const r){
struct reb_simulation_integrator_whfasthelio* const ri_whfasthelio = &(r->ri_whfasthelio);
reb_whfasthelio_interaction_step(r,r->dt);
reb_whfasthelio_jump_step(r,r->dt);
ri_whfasthelio->is_synchronized=0;
if (ri_whfasthelio->safe_mode){
reb_integrator_whfasthelio_synchronize(r);
}
r->t+=r->dt/2.;
r->dt_last_done = r->dt;
}
void reb_integrator_whfasthelio_reset(struct reb_simulation* const r){
struct reb_simulation_integrator_whfasthelio* const ri_whfasthelio = &(r->ri_whfasthelio);
ri_whfasthelio->allocated_N = 0;
ri_whfasthelio->safe_mode = 1;
ri_whfasthelio->recalculate_heliocentric_this_timestep = 0;
ri_whfasthelio->recalculate_heliocentric_but_not_synchronized_warning = 0;
ri_whfasthelio->is_synchronized = 1;
if (ri_whfasthelio->p_h){
free(ri_whfasthelio->p_h);
ri_whfasthelio->p_h = NULL;
}
}
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
magick_unreferenced(status);
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n");
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&min_value);
max_value=min_value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
double
value;
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
ntlmv1_mschapv2_fmt_plug.c | /*
* Previous files MSCHAPv2_fmt_plug.c and NETNTLM_fmt_plug.c now merged into
* this one file, sharing functions.
*
* NETNTLM_fmt.c -- NTLM Challenge/Response
* Written by JoMo-Kun <jmk at foofus.net> in 2007
* and placed in the public domain.
*
* This algorithm is designed for performing brute-force cracking of the NTLM
* (version 1) challenge/response pairs exchanged during network-based
* authentication attempts [1]. The captured challenge/response pairs from these
* attempts should be stored using the L0phtCrack 2.0 LC format, specifically:
* username:unused:unused:lm response:ntlm response:challenge. For example:
*
* CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1:
* C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788
*
* It should be noted that a NTLM authentication response is not same as a NTLM
* password hash, which can be extracted using tools such as FgDump [2]. NTLM
* responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
* This version supports Extended Session Security. This is what
* is used when the "LM" hash ends in 32 zeros:
*
* DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000:
* abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4
*
* MSCHAPv2_fmt.c -- Microsoft PPP CHAP Extensions, Version 2
* Written by JoMo-Kun <jmk at foofus.net> in 2010
* and placed in the public domain.
*
* Support for freeradius-wep-patch challenge/response format
* added by Linus Lüssing in 2012 and is licensed under CC0/PD terms:
* To the extent possible under law, Linus Lüssing has waived all copyright
* and related or neighboring rights to this work. This work is published from:
* Germany.
*
*
* This algorithm is designed for performing brute-force cracking of the
* MSCHAPv2 challenge/response sets exchanged during network-based
* authentication attempts. The captured challenge/response set from these
* attempts should be stored using the following format:
*
* USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* USERNAME::DOMAIN:AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* DOMAIN\USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* :::MSCHAPv2 CHALLENGE:MSCHAPv2 RESPONSE:
*
* For example:
* User:::5B5D7C7D7B3F2F3E3C2C602132262628:82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF:21402324255E262A28295F2B3A337C7E
* domain\fred:::56d64cbe7bad61349a0b752335100eaf:d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b:7f8a466cff2a6bf0c80218bbf56d76bc
*
* http://freeradius.org/rfc/rfc2759.txt
*
* Modified for performance and support for SSE2, NTLMv1 ESS, OMP and UTF-8, by
* magnum 2010-2011 and 2013.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_MSCHAPv2_new;
extern struct fmt_main fmt_NETNTLM_new;
#elif FMT_REGISTERS_H
john_register_one(&fmt_MSCHAPv2_new);
john_register_one(&fmt_NETNTLM_new);
#else
#include <string.h>
#include <openssl/des.h>
#include "arch.h"
#include "simd-intrinsics.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4)
#else
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#include <omp.h>
#endif
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "memory.h"
#include "johnswap.h"
#include "sha.h"
#include "md4.h"
#include "md5.h"
#include "unicode.h"
#include "john.h"
#include "memdbg.h"
extern volatile int bench_running;
#ifndef uchar
#define uchar unsigned char
#endif
#define CHAP_FORMAT_LABEL "MSCHAPv2"
#define CHAP_FORMAT_NAME "C/R"
#define FORMAT_TAG "$MSCHAPv2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAGN "$NETNTLM$"
#define FORMAT_TAGN_LEN (sizeof(FORMAT_TAGN)-1)
#define CHAP_USERNAME_LENGTH 256
#define CHAP_CHALLENGE_LENGTH 64
#define CHAP_TOTAL_LENGTH 13 + CHAP_USERNAME_LENGTH + CHAP_CHALLENGE_LENGTH + CIPHERTEXT_LENGTH
#define NTLM_FORMAT_LABEL "netntlm"
#define NTLM_FORMAT_NAME "NTLMv1 C/R"
#define NTLM_TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH)
#define ALGORITHM_NAME "MD4 DES (ESS MD5) " MD4_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define FULL_BINARY_SIZE (2 + 8 * 3)
#define BINARY_SIZE (2 + 8)
#define BINARY_ALIGN 2
#define SALT_SIZE 8
#define SALT_ALIGN MEM_ALIGN_WORD
#define CIPHERTEXT_LENGTH 48
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 27
//#define SSE_OMP
#if defined (_OPENMP) && defined(SSE_OMP)
#define BLOCK_LOOPS (2048 / NBKEYS)
#else
#define BLOCK_LOOPS (1024 / NBKEYS)
#endif
#define MIN_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS)
#define MAX_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS)
// These 2 get the proper uint32_t limb from the SIMD mixed set. They both
// work properly for both BE and LE machines :) These SHOULD be used whenever
// the full uint32_t item is wanted, usually RHS of an assignment to uint32_t*
// NOTE, i is number is based on uint32_t[] and not uint8_t[] offsets.
#define GETOUTPOS_W32(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i<<2)&(0xffffffff-3))*SIMD_COEF_32 + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32*4 )
#define GETPOS_W32(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i<<2)&(0xffffffff-3))*SIMD_COEF_32 + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
// GETPOS HAS to be BE/LE specific
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#endif
#else
#define PLAINTEXT_LENGTH 64
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 2048
#endif
#ifdef SIMD_COEF_32
static unsigned char *saved_key;
#else
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
#endif
static unsigned short (*crypt_key);
static unsigned char *nthash;
static uint32_t *bitmap;
static int cmps_per_crypt, use_bitmap;
static int valid_i, valid_j;
static uchar *challenge;
static int keys_prepared;
static struct fmt_main *my;
static char *chap_long_to_short(char *orig); /* used to cannonicalize the MSCHAPv2 format */
static struct fmt_tests chap_tests[] = {
{"$MSCHAPv2$4c092fd3fd98236502e8591100046326$b912ce522524d33123a982cf330a57f8e953fa7974042b5d$6a4915d0ce61d42be533640a75391925$1111", "2222"},
{"$MSCHAPv2$5B5D7C7D7B3F2F3E3C2C602132262628$82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF$21402324255E262A28295F2B3A337C7E$User", "clientPass"},
{"$MSCHAPv2$d07054459a1fdbc266a006f0220e6fac$33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde$3545cb1d89b507a5de104435e81b14a4$testuser1", "Cricket8"},
{"$MSCHAPv2$56d64cbe7bad61349a0b752335100eaf$d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b$7f8a466cff2a6bf0c80218bbf56d76bc$fred", "OMG!BBQ!11!one"}, /* domain\fred */
#if PLAINTEXT_LENGTH >= 35
{"$MSCHAPv2$b3c42db475b881d3c52ff3923d7b3bf8$f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8$6321f8649b971bd11ce8d5cb22a4a738$bOb", "asdblahblahblahblahblahblahblahblah"}, /* WorkGroup\bOb */
#endif
{"$MSCHAPv2$d94e7c7972b2376b28c268583e162de7$eba25a3b04d2c7085d01f842e2befc91745c40db0f792356$0677ca7318fd7f65ae1b4f58c9f4f400$lameuser", ""}, /* no password */
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$foo4", "bar4" },
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$", "bar4" },
/* Ettercap generated three test vectors */
{"$MSCHAPv2$3D79CC8CDC0261D4$B700770725F87739ADB110B310D9A289CDBB550ADCA6CB86$solar", "solarisalwaysbusy"},
{"$MSCHAPv2$BA75EB14EFBFBF25$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$lulu", "password"},
{"$MSCHAPv2$95A87FA62EBCD2E3C8B09E1B448A6C72$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$E2AE0995EAAC6CEFF0D9757428B51509$lulu", "password"},
/* Single test vector from chapcrack's sample pcap file */
{"$MSCHAPv2$6D0E1C056CD94D5F$1C93ABCE815400686BAECA315F348469256420598A73AD49$moxie", "bPCFyF2uL1p5Lg5yrKmqmY"},
{"", "clientPass", {"User", "", "", "5B5D7C7D7B3F2F3E3C2C602132262628", "82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF", "21402324255E262A28295F2B3A337C7E"} },
{"", "Cricket8", {"testuser1", "", "", "d07054459a1fdbc266a006f0220e6fac", "33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde", "3545cb1d89b507a5de104435e81b14a4"} },
{"", "OMG!BBQ!11!one", {"domain\\fred", "", "", "56d64cbe7bad61349a0b752335100eaf", "d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b", "7f8a466cff2a6bf0c80218bbf56d76bc"} }, /* domain\fred */
{"", "", {"lameuser", "", "domain", "d94e7c7972b2376b28c268583e162de7", "eba25a3b04d2c7085d01f842e2befc91745c40db0f792356", "0677ca7318fd7f65ae1b4f58c9f4f400"} }, /* no password */
{NULL}
};
static struct fmt_tests ntlm_tests[] = {
{"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"},
#ifndef SIMD_COEF_32 /* exceeds max length for SSE */
{"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"},
#endif
{"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"},
{"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"},
{"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"},
{"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"},
{"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"},
{"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} },
{"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} },
{"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} },
{"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} },
{"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} },
{NULL}
};
inline static void setup_des_key(uchar key_56[], DES_key_schedule *ks)
{
DES_cblock key;
key[0] = key_56[0];
key[1] = (key_56[0] << 7) | (key_56[1] >> 1);
key[2] = (key_56[1] << 6) | (key_56[2] >> 2);
key[3] = (key_56[2] << 5) | (key_56[3] >> 3);
key[4] = (key_56[3] << 4) | (key_56[4] >> 4);
key[5] = (key_56[4] << 3) | (key_56[5] >> 5);
key[6] = (key_56[5] << 2) | (key_56[6] >> 6);
key[7] = (key_56[6] << 1);
DES_set_key(&key, ks);
}
static int chap_valid_long(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > CHAP_TOTAL_LENGTH)
return 0;
/* Validate Authenticator/Server Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
/* Validate Peer/Client Challenge Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate Username Length */
if (strlen(++pos2) > CHAP_USERNAME_LENGTH)
return 0;
return 1;
}
static int chap_valid_short(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > CHAP_TOTAL_LENGTH)
return 0;
/* Validate MSCHAPv2 Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 4)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
return 1;
}
static void chap_get_challenge(const char *ciphertext,
unsigned char *binary_salt)
{
int i;
const char *pos = ciphertext + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++)
binary_salt[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) +
atoi16[ARCH_INDEX(pos[i*2+1])];
}
/* Either the cipherext already contains the MSCHAPv2 Challenge (4 Bytes) or
we are going to calculate it via:
sha1(|Peer/Client Challenge (8 Bytes)|Authenticator/Server Challenge (8 Bytes)|Username (<=256)|)
NOTE, we now ONLY call this function the the short form. The long form gets converted into the short
form in either prepare or split function. The short form is cannonical form (Change made July, 2014, JimF)
*/
static void *chap_get_salt(char *ciphertext)
{
static unsigned char *binary_salt;
unsigned char digest[20];
if (!binary_salt)
binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
/* This is just to silence scan-build. It will never happen.
It is unclear why only this format gave warnings, many others do
similar things. */
if (!ciphertext)
return ciphertext;
memset(binary_salt, 0, SALT_SIZE);
memset(digest, 0, 20);
chap_get_challenge(ciphertext, binary_salt);
return (void*)binary_salt;
}
/*
* This function will convert long hashes, into short ones (the short is now cannonical format)
* converts
* $MSCHAPv2$95a87fa62ebcd2e3c8b09e1b448a6c72$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$e2ae0995eaac6ceff0d9757428b51509$lulu
* into
* $MSCHAPv2$ba75eb14efbfbf25$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$$
*
* This code was moved from get_salt().
*/
static char *chap_long_to_short(char *ciphertext) {
static char Buf[CHAP_TOTAL_LENGTH+1]; // larger than we need, but not a big deal
static SHA_CTX ctx;
unsigned char tmp[16];
unsigned char digest[20];
char *pos = NULL;
int i;
SHA1_Init(&ctx);
/* Peer Challenge */
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1; /* Skip $MSCHAPv2$, Authenticator Challenge and Response Hash */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Authenticator Challenge */
pos = ciphertext + FORMAT_TAG_LEN; /* Skip $MSCHAPv2$ */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Username - Only the user name (as presented by the peer and
excluding any prepended domain name) is used as input to SHAUpdate()
*/
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1 + 16*2 + 1; /* Skip $MSCHAPv2$, Authenticator, Response and Peer */
SHA1_Update(&ctx, pos, strlen(pos));
SHA1_Final(digest, &ctx);
// Ok, now we re-make our ciphertext buffer, into the short cannonical form.
strcpy(Buf, FORMAT_TAG);
pos = Buf + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++) {
//binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
pos[(i<<1)] = itoa16[digest[i]>>4];
pos[(i<<1)+1] = itoa16[digest[i]&0xF];
}
memcpy(&pos[16], &ciphertext[42], CIPHERTEXT_LENGTH+2);
pos[16+CIPHERTEXT_LENGTH+2] = '$';
pos[16+CIPHERTEXT_LENGTH+3] = 0;
//printf("short=%s original=%s\n", Buf, ciphertext);
return Buf;
}
static int chap_valid(char *ciphertext, struct fmt_main *pFmt)
{
char *cp = NULL;
if (chap_valid_short(ciphertext))
cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1;
else if (chap_valid_long(ciphertext))
cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1;
if (cp) {
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
uchar binary[8];
DES_cblock *challenge = chap_get_salt(ciphertext);
int i, j;
cp += 2 * 8 * 2;
for (i = 0; i < 8; i++) {
binary[i] = atoi16[ARCH_INDEX(cp[i * 2])] << 4;
binary[i] |= atoi16[ARCH_INDEX(cp[i * 2 + 1])];
}
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8))
return 1;
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8)) {
valid_i = i;
valid_j = j;
return 1;
}
}
#ifdef DEBUG
if (!bench_running)
fprintf(stderr, "Rejected MSCHAPv2 hash with "
"invalid 3rd block\n");
#endif
}
return 0;
}
static char *chap_prepare_long(char *split_fields[10])
{
char *username, *cp;
/* DOMAIN\USERNAME -or - USERNAME -- ignore DOMAIN */
if ((username = strstr(split_fields[0], "\\")) == NULL)
username = split_fields[0];
else
username++;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+
1+strlen(split_fields[5])+1+strlen(username)+1);
sprintf(cp, "%s%s$%s$%s$%s", FORMAT_TAG, split_fields[3], split_fields[4],
split_fields[5], username);
if (chap_valid_long(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *chap_prepare_short(char *split_fields[10])
{
char *cp;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+
1+1+1);
sprintf(cp, "%s%s$%s$$", FORMAT_TAG, split_fields[3], split_fields[4]);
if (chap_valid_short(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *chap_prepare(char *split_fields[10], struct fmt_main *pFmt)
{
char *ret;
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) {
// check for a short format that has any extra trash fields, and if so remove them.
char *cp1, *cp2, *cp3;
cp1 = split_fields[1];
cp1 += FORMAT_TAG_LEN;
cp2 = strchr(cp1, '$');
ret = NULL;
if (cp2 && cp2-cp1 == CHAP_CHALLENGE_LENGTH/4) {
++cp2;
cp3 = strchr(cp2, '$');
if (cp3 && cp3-cp2 == CIPHERTEXT_LENGTH && (strlen(cp3) > 2 || cp3[2] != '$')) {
ret = str_alloc_copy(split_fields[1]);
ret[(cp3-split_fields[1]) + 1] = '$';
ret[(cp3-split_fields[1]) + 2] = 0;
//printf("Here is the cut item: %s\n", ret);
}
}
}
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
split_fields[5] &&
strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/2 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH &&
strlen(split_fields[5]) == CHAP_CHALLENGE_LENGTH/2)
ret = chap_prepare_long(split_fields);
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/4 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH)
ret = chap_prepare_short(split_fields);
else
ret = NULL;
if (ret && chap_valid_long(ret))
ret = chap_long_to_short(ret);
else if (chap_valid_long(split_fields[1]))
ret = chap_long_to_short(split_fields[1]);
return ret ? ret : split_fields[1];
}
static char *chap_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CHAP_TOTAL_LENGTH + 1];
int i, j = 0;
memset(out, 0, CHAP_TOTAL_LENGTH + 1);
memcpy(out, ciphertext, strlen(ciphertext));
/* convert hashes to lower-case - exclude $MSCHAPv2 and USERNAME */
for (i = FORMAT_TAG_LEN; i < CHAP_TOTAL_LENGTH + 1 && j < 3; i++) {
if (out[i] >= 'A' && out[i] <= 'Z')
out[i] |= 0x20;
else if (out[i] == '$')
j++;
}
if (chap_valid_long(out))
return chap_long_to_short(out);
return out;
}
static void *ntlm_get_salt(char *ciphertext)
{
static uchar *binary_salt;
int i;
if (!binary_salt)
binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
if (ciphertext[25] == '$') {
// Server challenge
ciphertext += FORMAT_TAGN_LEN;
for (i = 0; i < SALT_SIZE; ++i)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
} else {
uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE];
MD5_CTX ctx;
ciphertext += FORMAT_TAGN_LEN;
// Extended Session Security,
// Concatenate Server & Client challenges
for (i = 0;i < 2 * SALT_SIZE; ++i)
es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
// MD5 the concatenated challenges, result is our key
MD5_Init(&ctx);
MD5_Update(&ctx, es_salt, 16);
MD5_Final((void*)k1, &ctx);
memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it
}
return (void*)binary_salt;
}
static int ntlm_valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (strncmp(ciphertext, FORMAT_TAGN, FORMAT_TAGN_LEN)!=0) return 0;
if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0;
if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0;
for (pos = &ciphertext[FORMAT_TAGN_LEN]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (*pos != '$') return 0;
for (pos++; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) ||
(pos - ciphertext - 42 == CIPHERTEXT_LENGTH))) {
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
uchar binary[8];
DES_cblock *challenge = ntlm_get_salt(ciphertext);
int i, j;
ciphertext = strrchr(ciphertext, '$') + 1 + 2 * 8 * 2;
for (i = 0; i < 8; i++) {
binary[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4;
binary[i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8))
return 1;
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8)) {
valid_i = i;
valid_j = j;
return 1;
}
}
#ifdef DEBUG
if (!bench_running)
fprintf(stderr, "Rejected NetNTLM hash with invalid "
"3rd block\n");
#endif
}
return 0;
}
static char *ntlm_prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
char clientChal[17];
if (!strncmp(split_fields[1], FORMAT_TAGN, FORMAT_TAGN_LEN))
return split_fields[1];
if (!split_fields[3]||!split_fields[4]||!split_fields[5])
return split_fields[1];
if (strlen(split_fields[4]) != CIPHERTEXT_LENGTH)
return split_fields[1];
// this string suggests we have an improperly formatted NTLMv2
if (!strncmp(&split_fields[4][32], "0101000000000000", 16))
return split_fields[1];
// Ignore anonymous login (Username "", Password "")
if (split_fields[0] && strlen(split_fields[0]) == 0 &&
!strncasecmp(split_fields[3], "edb7398877d716be", 16) &&
!strncasecmp(split_fields[4], "42aeb71fbb6dc18499016b08"
"b178ba65430ad39ae2498629", 48))
return split_fields[1];
// Handle ESS (8 byte client challenge in "LM" field padded with zeros)
if (strlen(split_fields[3]) == 48 &&
!strncmp(&split_fields[3][16], "00000000000000000000000000000000",
32))
{
memcpy(clientChal, split_fields[3],16);
clientChal[16] = 0;
}
else
clientChal[0] = 0;
cp = mem_alloc(FORMAT_TAGN_LEN+strlen(split_fields[5])+strlen(clientChal)+1+
strlen(split_fields[4])+1);
sprintf(cp, "%s%s%s$%s", FORMAT_TAGN, split_fields[5], clientChal,
split_fields[4]);
if (ntlm_valid(cp,self)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *ntlm_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[NTLM_TOTAL_LENGTH + 1];
memset(out, 0, NTLM_TOTAL_LENGTH + 1);
strcpy(out, ciphertext);
strlwr(&out[FORMAT_TAGN_LEN]); /* Exclude: $NETNTLM$ */
return out;
}
static void set_salt(void *salt)
{
challenge = salt;
}
// ISO-8859-1 to UCS-2, directly into vector key buffer
static void set_key_ansi(char *_key, int index)
{
#ifdef SIMD_COEF_32
const uchar *key = (uchar*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS_W32(0, index)];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp2 |= (temp << 16);
*keybuf_word = temp2;
}
else
{
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
#if ARCH_LITTLE_ENDIAN
UTF8 *s = (UTF8*)_key;
UTF16 *d = saved_key[index];
while (*s)
*d++ = *s++;
*d = 0;
saved_len[index] = (int)((char*)d - (char*)saved_key[index]);
#else
UTF8 *s = (UTF8*)_key;
UTF8 *d = (UTF8*)saved_key[index];
while (*s) {
*d++ = *s++;
++d;
}
*d = 0;
saved_len[index] = (int)((char*)d - (char*)saved_key[index]);
#endif
#endif
keys_prepared = 0;
}
// Legacy codepage to UCS-2, directly into vector key buffer
static void set_key_CP(char *_key, int index)
{
#ifdef SIMD_COEF_32
const uchar *key = (uchar*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS_W32(0, index)];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
temp2 = CP_to_Unicode[temp2];
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp = CP_to_Unicode[temp];
temp2 |= (temp << 16);
*keybuf_word = temp2;
} else {
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning_enc;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning_enc:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len[index] = enc_to_utf16(saved_key[index],
PLAINTEXT_LENGTH + 1,
(uchar*)_key,
strlen(_key)) << 1;
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
#endif
keys_prepared = 0;
}
// UTF-8 to UCS-2, directly into vector key buffer
static void set_key_utf8(char *_key, int index)
{
#ifdef SIMD_COEF_32
const UTF8 *source = (UTF8*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS_W32(0, index)];
UTF32 chl, chh = 0x80;
unsigned int len = 0;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead;
extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
#if NT_FULL_UNICODE
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
#endif
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
#if NT_FULL_UNICODE
if (chl > UNI_MAX_BMP) {
if (len == PLAINTEXT_LENGTH) {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
len++;
} else
#endif
if (*source && len < PLAINTEXT_LENGTH) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
#if NT_FULL_UNICODE
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
#endif
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
} else {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
}
if (chh != 0x80 || len == 0) {
*keybuf_word = 0x80;
keybuf_word += SIMD_COEF_32;
}
bailout:
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len[index] = utf8_to_utf16(saved_key[index],
PLAINTEXT_LENGTH + 1,
(uchar*)_key,
strlen(_key)) << 1;
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
#endif
keys_prepared = 0;
}
static void init(struct fmt_main *self)
{
#if defined (_OPENMP) && !defined(SIMD_COEF_32)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
my = self;
if (options.target_enc == UTF_8) {
self->methods.set_key = set_key_utf8;
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
} else {
if (options.target_enc != ASCII &&
options.target_enc != ISO_8859_1)
self->methods.set_key = set_key_CP;
}
if (!saved_key) {
#if SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key) * 64, MEM_ALIGN_SIMD);
nthash = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*nthash) * 16, MEM_ALIGN_SIMD);
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
nthash = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*nthash) * 16);
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#endif
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(unsigned short));
}
if (bitmap == NULL)
bitmap = mem_calloc_align(1, 0x10000 / 8, MEM_ALIGN_CACHE);
else
memset(bitmap, 0, 0x10000 / 8);
use_bitmap = 0; /* we did not use bitmap yet */
cmps_per_crypt = 2; /* try bitmap */
}
static void done(void)
{
MEM_FREE(bitmap);
MEM_FREE(crypt_key);
MEM_FREE(nthash);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
MEM_FREE(saved_key);
}
// Get the key back from the key buffer, from UCS-2
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS_W32(0, index)];
static UTF16 key[PLAINTEXT_LENGTH + 1];
unsigned int md4_size=0;
unsigned int i=0;
for (; md4_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md4_size++)
{
#if ARCH_LITTLE_ENDIAN==1
key[md4_size] = keybuf_word[i];
key[md4_size+1] = keybuf_word[i] >> 16;
if (key[md4_size] == 0x80 && key[md4_size+1] == 0) {
key[md4_size] = 0;
break;
}
++md4_size;
if (key[md4_size] == 0x80 &&
((keybuf_word[i+SIMD_COEF_32]&0xFFFF) == 0 ||
md4_size == PLAINTEXT_LENGTH))
{
key[md4_size] = 0;
break;
}
#else
unsigned int INWORD = JOHNSWAP(keybuf_word[i]);
key[md4_size] = INWORD >> 16;
key[md4_size+1] = INWORD;
if (key[md4_size] == 0x8000 && key[md4_size+1] == 0) {
key[md4_size] = 0;
break;
}
++md4_size;
if (key[md4_size] == 0x8000 && (md4_size == PLAINTEXT_LENGTH ||
(keybuf_word[i+SIMD_COEF_32]&0xFFFF0000) == 0))
{
key[md4_size] = 0;
break;
}
#endif
}
return (char*)utf16_to_enc(key);
#else
return (char*)utf16_to_enc(saved_key[index]);
#endif
}
static void *get_binary(char *ciphertext)
{
static uchar *binary;
static int warned = 0, loaded = 0;
DES_cblock *challenge = my->methods.salt(ciphertext);
int i, j;
if (!binary) binary = mem_alloc_tiny(FULL_BINARY_SIZE, BINARY_ALIGN);
if (john_main_process)
if (!warned && !ldr_in_pot && !bench_running && ++loaded > 100) {
warned = 1;
fprintf(stderr, "%s: Note: slow loading. For short runs, try "
"--format=%s-naive\ninstead. That version loads "
"faster but runs slower.\n", my->params.label,
my->params.label);
}
if (chap_valid_short(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1;
else if (chap_valid_long(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1;
else /* ntlmv1 */
ciphertext = strrchr(ciphertext, '$') + 1;
for (i = 0; i < FULL_BINARY_SIZE - 2; i++) {
binary[2 + i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4;
binary[2 + i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
{
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) {
binary[0] = valid_i; binary[1] = valid_j;
goto out;
}
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) {
binary[0] = i; binary[1] = j;
goto out;
}
}
fprintf(stderr, "Bug: %s hash with invalid 3rd block, should "
"have been rejected in valid()\n", my->params.label);
binary[0] = binary[1] = 0x55;
}
out:
return binary;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
if (!keys_prepared) {
int i = 0;
if (use_bitmap) {
#if MAX_KEYS_PER_CRYPT >= 200
//#warning Notice: Using memset
memset(bitmap, 0, 0x10000 / 8);
#else
//#warning Notice: Not using memset
#ifdef SIMD_COEF_32
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++)
#else
for (i = 0; i < count; i++)
#endif
{
unsigned int value = crypt_key[i];
bitmap[value >> 5] = 0;
}
#endif
}
use_bitmap = cmps_per_crypt >= 2;
cmps_per_crypt = 0;
#ifdef SIMD_COEF_32
#if (BLOCK_LOOPS > 1)
#if defined(_OPENMP) && defined(SSE_OMP)
#pragma omp parallel for
#endif
for (i = 0; i < BLOCK_LOOPS; i++)
SIMDmd4body(&saved_key[i * NBKEYS * 64], (unsigned int*)&nthash[i * NBKEYS * 16], NULL, SSEi_MIXED_IN);
#else
SIMDmd4body(saved_key, (unsigned int*)nthash, NULL, SSEi_MIXED_IN);
#endif
if (use_bitmap)
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {
unsigned int value;
value = *(uint32_t*)
&nthash[GETOUTPOS_W32(3, i)] >> 16;
crypt_key[i] = value;
bitmap[value >> 5] |= 1U << (value & 0x1f);
}
else
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {
crypt_key[i] = *(uint32_t*)
&nthash[GETOUTPOS_W32(3, i)] >> 16;
}
#else
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++)
#endif
{
MD4_CTX ctx;
MD4_Init( &ctx );
MD4_Update(&ctx, saved_key[i], saved_len[i]);
MD4_Final((uchar*)&nthash[i * 16], &ctx);
crypt_key[i] = ((unsigned short*)&nthash[i * 16])[7];
if (use_bitmap) {
unsigned int value = crypt_key[i];
bitmap[value >> 5] |= 1U << (value & 0x1f);
}
}
#endif
keys_prepared = 1;
}
return count;
}
static int cmp_one(void *binary, int index)
{
#if ARCH_LITTLE_ENDIAN==1
if (crypt_key[index] == *(unsigned short*)binary)
#else
if ( JOHNSWAP(crypt_key[index])>>16 == *(unsigned short*)binary)
#endif
{
DES_key_schedule ks;
DES_cblock computed_binary;
unsigned int key[2];
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < 2; i++)
key[i] =
#if ARCH_LITTLE_ENDIAN==1
*(uint32_t*) &nthash[GETOUTPOS_W32(i, index)];
#else
JOHNSWAP (*(uint32_t*) &nthash[GETOUTPOS_W32(i, index)]);
#endif
#else
memcpy(key, &nthash[index * 16], 8);
#endif
setup_des_key((unsigned char*)key, &ks);
DES_ecb_encrypt((DES_cblock*)challenge, &computed_binary,
&ks, DES_ENCRYPT);
return !memcmp(((char*)binary) + 2, computed_binary, 8);
}
return 0;
}
static int cmp_all(void *binary, int count)
{
#if ARCH_LITTLE_ENDIAN==1
unsigned int value = *(unsigned short*)binary;
#else
unsigned int value = JOHNSWAP(*(unsigned short*)binary)>>16;
#endif
int index;
cmps_per_crypt++;
if (use_bitmap && !(bitmap[value >> 5] & (1U << (value & 0x1f))))
goto out;
#ifdef SIMD_COEF_32
/* Let's give the optimizer a hint! */
for (index = 0; index < NBKEYS * BLOCK_LOOPS; index += 2)
#else
for (index = 0; index < count; index += 2)
#endif
{
unsigned int a = crypt_key[index];
unsigned int b = crypt_key[index + 1];
#if 0
if (((a | b) & value) != value)
continue;
#endif
if (a == value || b == value)
goto thorough;
}
goto out;
thorough:
#ifdef SIMD_COEF_32
for (index = 0; index < NBKEYS * BLOCK_LOOPS; index++)
#else
for (; index < count; index++)
#endif
{
if (crypt_key[index] == value && cmp_one(binary, index))
return 1;
}
out:
return 0;
}
static int cmp_exact(char *source, int index)
{
DES_key_schedule ks;
uchar binary[24];
union {
unsigned char key[24];
unsigned int Key32[6];
}k;
char *cp;
int i;
#ifdef SIMD_COEF_32
for (i = 0; i < 4; i++)
k.Key32[i] =
#if ARCH_LITTLE_ENDIAN==1
*(uint32_t*)&nthash[GETOUTPOS_W32(i, index)];
#else
JOHNSWAP(*(uint32_t*)&nthash[GETOUTPOS_W32(i, index)]);
#endif
#else
memcpy(k.key, &nthash[index * 16], 16);
#endif
/* Hash is NULL padded to 21-bytes */
memset(&k.key[16], 0, 5);
/* Split into three 7-byte segments for use as DES keys
Use each key to DES encrypt challenge
Concatenate output to for 24-byte NTLM response */
setup_des_key(k.key, &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)binary,
&ks, DES_ENCRYPT);
setup_des_key(&k.key[7], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[8],
&ks, DES_ENCRYPT);
setup_des_key(&k.key[14], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[16],
&ks, DES_ENCRYPT);
// With the normalized source we simply need to skip the
// $MSCHAPv2$hhhhhhhhhhhhhhhh$ string to get 'real' binary data.
// $NETNTLM$c75c20bff9baa71f4765f360625700b0$
cp = &source[11];
cp = strchr(cp, '$');
++cp;
for (i = 0; i < 24; ++i) {
unsigned char c = (atoi16[ARCH_INDEX(*cp)] << 4) +
(atoi16[ARCH_INDEX(*(cp+1))] );
if (c != binary[i])
return 0;
cp += 2;
}
return 1;
}
static int salt_hash(void *salt) { return *(uint32_t*)salt & (SALT_HASH_SIZE - 1); }
#if ARCH_LITTLE_ENDIAN==1
static int binary_hash_0(void *binary) { return *(unsigned short*)binary & PH_MASK_0; }
static int binary_hash_1(void *binary) { return *(unsigned short*)binary & PH_MASK_1; }
static int binary_hash_2(void *binary) { return *(unsigned short*)binary & PH_MASK_2; }
static int binary_hash_3(void *binary) { return *(unsigned short*)binary & PH_MASK_3; }
#else
static int binary_hash_0(void *binary) { return (JOHNSWAP(*(unsigned short*)binary)>>16) & PH_MASK_0; }
static int binary_hash_1(void *binary) { return (JOHNSWAP(*(unsigned short*)binary)>>16) & PH_MASK_1; }
static int binary_hash_2(void *binary) { return (JOHNSWAP(*(unsigned short*)binary)>>16) & PH_MASK_2; }
static int binary_hash_3(void *binary) { return (JOHNSWAP(*(unsigned short*)binary)>>16) & PH_MASK_3; }
#endif
static int get_hash_0(int index) { return crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index] & PH_MASK_3; }
struct fmt_main fmt_MSCHAPv2_new = {
{
CHAP_FORMAT_LABEL,
CHAP_FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || (defined(SIMD_COEF_32) && defined(SSE_OMP))
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
chap_tests
}, {
init,
done,
fmt_default_reset,
chap_prepare,
chap_valid,
chap_split,
get_binary,
chap_get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
NULL,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key_ansi,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
NULL,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_NETNTLM_new = {
{
NTLM_FORMAT_LABEL,
NTLM_FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || (defined(SIMD_PARA_MD4) && defined(SSE_OMP))
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAGN },
ntlm_tests
}, {
init,
done,
fmt_default_reset,
ntlm_prepare,
ntlm_valid,
ntlm_split,
get_binary,
ntlm_get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
NULL,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key_ansi,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
NULL,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
gameoflife.c | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdarg.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
//OPTIONAL: comment this out for console outputf
//#define CONSOLE_OUTPUT
#define calcIndex(width, x, y) ((y) * (width) + (x))
#define ALIVE 1
#define DEAD 0
void myexit(const char *s, ...)
{
va_list args;
va_start(args, s);
vprintf(s, args);
printf("\n");
va_end(args);
abort();
}
char vtk_header[2048];
void create_vtk_header(char *header, int width, int height, int timestep)
{
char buffer[1024];
header[0] = '\0';
strcat(header, "# vtk DataFile Version 3.0\n");
snprintf(buffer, sizeof(buffer), "Gameoflife timestep %d \n", timestep);
strcat(header, buffer);
strcat(header, "BINARY\n");
strcat(header, "DATASET STRUCTURED_POINTS\n");
snprintf(buffer, sizeof(buffer), "DIMENSIONS %d %d 1\n", width, height);
strcat(header, buffer);
strcat(header, "SPACING 1.0 1.0 1.0\n");
strcat(header, "ORIGIN 0 0 0\n");
snprintf(buffer, sizeof(buffer), "POINT_DATA %d\n", width * height);
strcat(header, buffer);
strcat(header, "SCALARS data char 1\n");
strcat(header, "LOOKUP_TABLE default\n");
}
void write_vtk_data(FILE *f, char *data, int length)
{
if (fwrite(data, sizeof(char), length, f) != length)
{
myexit("Could not write vtk-Data");
}
}
void write_field(char *currentfield, int width, int height, int timestep)
{
#ifdef CONSOLE_OUTPUT
printf("\033[H");
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
printf(ALIVE == currentfield[calcIndex(width, x, y)] ? "\033[07m \033[m" : " ");
printf("\033[E");
}
fflush(stdout);
printf("\ntimestep=%d", timestep);
usleep(80000);
#else
if (timestep == 0)
{
mkdir("./gol/", 0777);
create_vtk_header(vtk_header, width, height, timestep);
}
printf("writing timestep %d\n", timestep);
FILE *fp; // The current file handle.
char filename[1024];
snprintf(filename, 1024, "./gol/gol-%05d.vtk", timestep);
fp = fopen(filename, "w");
write_vtk_data(fp, vtk_header, strlen(vtk_header));
write_vtk_data(fp, currentfield, width * height);
fclose(fp);
printf("finished writing timestep %d\n", timestep);
#endif
}
void evolve(char *currentfield, char *newfield, int width, int height, int regions_x, int regions_y)
{
// TODO traverse through each voxel and implement game of life logic
// HINT: avoid boundaries
int region_width = (width-2) / regions_x;
int region_height = (height-2) / regions_y;
int omp_threads = regions_x * regions_y;
printf("DEBUG: evolving with %d threads.\n", omp_threads);
#pragma omp parallel num_threads(omp_threads)
{
int this_thread = omp_get_thread_num();
int my_start_y = ((this_thread)/regions_x) * region_height + 1;
int my_end_y = ((this_thread)/regions_x) * region_height + region_height;
int my_start_x = ((this_thread) % regions_x) * region_width + 1;
int my_end_x = ((this_thread) % regions_x) * region_width + region_width;
printf("DEBUG: region %d: start:(%d,%d) end: (%d,%d).\n", this_thread,my_start_x, my_start_y, my_end_x, my_end_y);
for(int y = my_start_y; y <= my_end_y; y++)
{
for (int x = my_start_x; x <= my_end_x; x++)
{
int neighbors = 0;
int xy_index = calcIndex(width, x, y);
for (int j = y - 1; j <= y + 1; j++)
{
for (int i = x - 1; i <= x + 1; i++)
{
int ij_index = calcIndex(width, i, j);
if (currentfield[ij_index] == 1 && ij_index != xy_index)
neighbors++;
}
}
if (neighbors < 2)
newfield[xy_index] = DEAD;
else if (neighbors == 2)
newfield[xy_index] = currentfield[xy_index];
else if (neighbors == 3)
newfield[xy_index] = ALIVE;
else
newfield[xy_index] = DEAD;
}
}
}
}
void filling_random(char *currentfield, int width, int height)
{
int i;
for (int y = 1; y < height - 1; y++)
{
for (int x = 1; x < width - 1; x++)
{
i = calcIndex(width, x, y);
currentfield[i] = (rand() < RAND_MAX / 10) ? 1 : 0; ///< init domain randomly
}
}
}
void filling_runner(char *currentfield, int width, int height)
{
currentfield[calcIndex(width, width / 4 + 0, height / 2 + 1)] = ALIVE;
currentfield[calcIndex(width, width / 4 + 1, height / 2 + 2)] = ALIVE;
currentfield[calcIndex(width, width / 4 + 2, height / 2 + 0)] = ALIVE;
currentfield[calcIndex(width, width / 4 + 2, height / 2 + 1)] = ALIVE;
currentfield[calcIndex(width, width / 4 + 2, height / 2 + 2)] = ALIVE;
}
void filling_rpentomino(char *currentfield, int width, int height)
{
currentfield[calcIndex(width, width / 2 + 0, height / 2 + 1)] = ALIVE;
currentfield[calcIndex(width, width / 2 + 1, height / 2 + 0)] = ALIVE;
currentfield[calcIndex(width, width / 2 + 1, height / 2 + 1)] = ALIVE;
currentfield[calcIndex(width, width / 2 + 1, height / 2 + 2)] = ALIVE;
currentfield[calcIndex(width, width / 2 + 2, height / 2 + 2)] = ALIVE;
}
void game(int width, int height, int num_timesteps, int regions_x, int regions_y)
{
char *currentfield = calloc(width * height, sizeof(char));
char *newfield = calloc(width * height, sizeof(char));
//filling_random(currentfield, width, height);
filling_runner(currentfield, width, height);
//filling_rpentomino(currentfield, width, height);
int time = 0;
write_field(currentfield, width, height, time);
for (time = 1; time <= num_timesteps; time++)
{
// TODO 2: implement evolve function (see above)
evolve(currentfield, newfield, width, height, regions_x, regions_y);
// apply periodic boundary condition
int ci;
int ni;
for (int y = 0; y < height; y++)
{
ni = calcIndex(width, 0, y);
ci = calcIndex(width, width - 2, y);
newfield[ni] = newfield[ci];
ni = calcIndex(width, width - 1, y);
ci = calcIndex(width, 1, y);
newfield[ni] = newfield[ci];
}
for (int x = 0; x < width; x++)
{
ci = calcIndex(width, x, height - 2);
ni = calcIndex(width, x, 0);
newfield[ni] = newfield[ci];
ci = calcIndex(width, x, 1);
ni = calcIndex(width, x, height - 1);
newfield[ni] = newfield[ci];
}
write_field(newfield, width, height, time);
// SWAP of the fields
char *temp = currentfield;
currentfield = newfield;
newfield = temp;
}
free(currentfield);
free(newfield);
}
int main(int c, char **v)
{
int width = 0, height = 0, num_timesteps, regions_x, regions_y;
if (c == 6)
{
width = atoi(v[1]) + 2; ///< read width + 2 boundary cells (low x, high x)
height = atoi(v[2]) + 2; ///< read height + 2 boundary cells (low y, high y)
num_timesteps = atoi(v[3]); ///< read timesteps
regions_x = atoi(v[4]); ///< read subspaces in x
regions_y = atoi(v[5]); ///< read subspaces in y
if (width <= 0)
{
width = 32; ///< default width
}
if (height <= 0)
{
height = 32; ///< default height
}
game(width, height, num_timesteps, regions_x, regions_y);
}
else
{
myexit("Too less arguments");
}
}
|
GB_binop__rminus_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fc32)
// A*D function (colscale): GB (_AxD__rminus_fc32)
// D*A function (rowscale): GB (_DxB__rminus_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fc32)
// C=scalar+B GB (_bind1st__rminus_fc32)
// C=scalar+B' GB (_bind1st_tran__rminus_fc32)
// C=A+scalar GB (_bind2nd__rminus_fc32)
// C=A'+scalar GB (_bind2nd_tran__rminus_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_FC32_minus (bij, aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_minus (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FC32 || GxB_NO_RMINUS_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_minus (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_minus (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_minus (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_minus (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ImageWaveformUtils.h | #ifndef CAPTURE3_IMAGE_WAVEFORM_UTILS_H
#define CAPTURE3_IMAGE_WAVEFORM_UTILS_H
#include <cmath>
#include <vector>
#include <omp.h>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <QtGui/QImage>
#include "../engine/objects/image/ImageSize.h"
#include "../engine/objects/image/ImageChannel.h"
namespace Capture3
{
static void generateWaveform(
const ImageSize &imageSize,
const ImageChannel &imageChannel,
QImage &outputX,
QImage &outputY,
QImage &outputZ
)
{
// Get output data
const auto outputWidth = (unsigned int) outputX.width();
const auto outputHeight = (unsigned int) outputX.height();
const unsigned int outputArea = outputWidth * outputHeight;
const cv::Size outputSize(outputWidth, outputHeight);
unsigned char *outputDataX = outputX.bits();
unsigned char *outputDataY = outputY.bits();
unsigned char *outputDataZ = outputZ.bits();
// Get image data
const unsigned int imageWidth = imageSize.getWidth();
const unsigned int imageHeight = imageSize.getHeight();
const double *imageData = imageChannel.getData();
// Create height map
const unsigned int mapWidth = std::min(imageWidth, outputWidth * 4);
const unsigned int mapHeight = std::min(imageHeight, outputHeight * 4);
const unsigned int mapMax = mapHeight - 1;
const double mapScaleX = imageWidth / (double) mapWidth;
const double mapScaleY = imageHeight / (double) mapHeight;
const cv::Size mapSize(mapWidth, mapHeight);
cv::Mat map(mapSize, CV_64FC3, cv::Scalar(0));
auto *mapData = (double *) map.data;
// Iterate over pixels and convert pixel value to position
#pragma omp parallel for schedule(static)
for (unsigned int x = 0; x < mapWidth; x++) {
for (unsigned int y = 0; y < mapHeight; y++) {
// Calculate position of pixel to sample
const auto imageX = (unsigned int) lround(x * mapScaleX);
const auto imageY = (unsigned int) lround(y * mapScaleY);
// Fetch values from channels
const unsigned int index = (imageY * imageWidth + imageX) * 3;
double valueX = 1.0 - imageData[index + 0];
double valueY = 1.0 - imageData[index + 1];
double valueZ = 1.0 - imageData[index + 2];
valueX = valueX < 0 ? 0 : valueX > 1 ? 1 : valueX;
valueY = valueY < 0 ? 0 : valueY > 1 ? 1 : valueY;
valueZ = valueZ < 0 ? 0 : valueZ > 1 ? 1 : valueZ;
// Convert color to height
const auto heightX = (unsigned int) lround(valueX * mapMax);
const auto heightY = (unsigned int) lround(valueY * mapMax);
const auto heightZ = (unsigned int) lround(valueZ * mapMax);
// Store value
mapData[(heightX * mapWidth + x) * 3 + 0] += valueX;
mapData[(heightY * mapWidth + x) * 3 + 1] += valueY;
mapData[(heightZ * mapWidth + x) * 3 + 2] += valueZ;
}
}
// Scale map to output size
cv::Mat scaled;
cv::resize(map, scaled, outputSize, 0, 0, cv::INTER_AREA);
const double *scaledData = (double *) scaled.data;
// Find max value
double maxX = 0.0001;
double maxY = 0.0001;
double maxZ = 0.0001;
for (unsigned int i = 0; i < outputArea; i++) {
const unsigned int index = i * 3;
maxX = scaledData[index + 0] > maxX ? scaledData[index + 0] : maxX;
maxY = scaledData[index + 1] > maxY ? scaledData[index + 1] : maxY;
maxZ = scaledData[index + 2] > maxZ ? scaledData[index + 2] : maxZ;
}
#pragma omp parallel for schedule(static)
for (unsigned int i = 0; i < outputArea; i++) {
// Calculate input and output index
const unsigned int indexInput = i * 3;
const unsigned int indexOutput = i * 4;
// Fetch and normalize values
double valueX = scaledData[indexInput + 0] / maxX;
double valueY = scaledData[indexInput + 1] / maxY;
double valueZ = scaledData[indexInput + 2] / maxZ;
valueX = std::sin(valueX * M_PI_2);
valueY = std::sin(valueY * M_PI_2);
valueZ = std::sin(valueZ * M_PI_2);
valueX = valueX < 0 ? 0 : valueX > 1 ? 1 : valueX;
valueY = valueY < 0 ? 0 : valueY > 1 ? 1 : valueY;
valueZ = valueZ < 0 ? 0 : valueZ > 1 ? 1 : valueZ;
// Calculate output colors
const auto colorX = (unsigned int) lround((valueX * 130.0) + 40.0);
const auto colorY = (unsigned int) lround((valueY * 130.0) + 40.0);
const auto colorZ = (unsigned int) lround((valueZ * 130.0) + 40.0);
// Store them
outputDataX[indexOutput + 0] = (unsigned char) colorX;
outputDataX[indexOutput + 1] = (unsigned char) colorX;
outputDataX[indexOutput + 2] = (unsigned char) colorX;
outputDataX[indexOutput + 3] = 255;
outputDataY[indexOutput + 0] = (unsigned char) colorY;
outputDataY[indexOutput + 1] = (unsigned char) colorY;
outputDataY[indexOutput + 2] = (unsigned char) colorY;
outputDataY[indexOutput + 3] = 255;
outputDataZ[indexOutput + 0] = (unsigned char) colorZ;
outputDataZ[indexOutput + 1] = (unsigned char) colorZ;
outputDataZ[indexOutput + 2] = (unsigned char) colorZ;
outputDataZ[indexOutput + 3] = 255;
}
map.release();
scaled.release();
}
}
#endif // CAPTURE3_IMAGE_WAVEFORM_UTILS_H
|
fill_nr_s8.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "cvhf.h"
#include "nr_direct.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int GTOmax_shell_dim(int *ao_loc, int *shls, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
void int2e_optimizer(CINTOpt **opt, int *atm, int natm, int *bas, int nbas, double *env);
/*
* 8-fold symmetry, k>=l, k>=i>=j,
*/
static void fillnr_s8(int (*intor)(), int (*fprescreen)(), double *eri,
int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int *atm = envs->atm;
const int *bas = envs->bas;
const double *env = envs->env;
const int natm = envs->natm;
const int nbas = envs->nbas;
const int *ao_loc = envs->ao_loc;
const int *shls_slice = envs->shls_slice;
const CINTOpt *cintopt = envs->cintopt;
const int nao = ao_loc[nbas];
const size_t nao2 = nao * nao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
double *cache = eri + di * dj * nao2;
int dims[4] = {nao, nao, dj, di};
int ksh, lsh, dk, dl, ij, k, l;
int shls[4];
double *peri;
shls[2] = jsh;
shls[3] = ish;
for (ksh = 0; ksh <= ish; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
shls[0] = lsh;
shls[1] = ksh;
peri = eri + ao_loc[ksh] * nao + ao_loc[lsh];
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
(*intor)(peri, dims, shls, atm, natm, bas, nbas, env,
cintopt, cache);
} else {
for (ij = 0; ij < di*dj; ij++) {
for (k = 0; k < ao_loc[ksh+1]-ao_loc[ksh]; k++) {
for (l = 0; l < ao_loc[lsh+1]-ao_loc[lsh]; l++) {
peri[k*nao+l] = 0;
} }
peri += nao2;
}
}
} }
}
static void store_ij(int (*intor)(), double *eri, double *buf, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int nbas = envs->nbas;
const int *ao_loc = envs->ao_loc;
const CINTOpt *cintopt = envs->cintopt;
const int nao = ao_loc[nbas];
const size_t nao2 = nao * nao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
int i, j, k, l, i0, j0, kl;
size_t ij0;
double *peri, *pbuf;
fillnr_s8(intor, vhfopt->fprescreen, buf, ish, jsh, vhfopt, envs);
for (i0 = ao_loc[ish], i = 0; i < di; i++, i0++) {
for (j0 = ao_loc[jsh], j = 0; j < dj; j++, j0++) {
if (i0 >= j0) {
ij0 = i0*(i0+1)/2 + j0;
peri = eri + ij0*(ij0+1)/2;
pbuf = buf + nao2 * (i*dj+j);
for (kl = 0, k = 0; k < i0; k++) {
for (l = 0; l <= k; l++, kl++) {
peri[kl] = pbuf[k*nao+l];
} }
// k == i0
for (l = 0; l <= j0; l++, kl++) {
peri[kl] = pbuf[k*nao+l];
}
}
} }
}
void GTO2e_cart_or_sph(int (*intor)(), CINTOpt *cintopt, double *eri, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nao = ao_loc[nbas];
IntorEnvs envs = {natm, nbas, atm, bas, env, NULL, ao_loc, NULL,
cintopt, 1};
CVHFOpt *vhfopt;
CVHFnr_optimizer(&vhfopt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env);
vhfopt->fprescreen = CVHFnr_schwarz_cond;
int shls_slice[] = {0, nbas};
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 1);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, eri, ao_loc, nbas, envs, vhfopt)
{
int i, j, ij;
double *buf = malloc(sizeof(double) * (di*di*nao*nao + cache_size));
#pragma omp for nowait schedule(dynamic, 2)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
i = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
j = ij - (i*(i+1)/2);
store_ij(intor, eri, buf, i, j, vhfopt, &envs);
}
free(buf);
}
CVHFdel_optimizer(&vhfopt);
}
|
inputOmpfor.c | /*************************************************
PI calculation
separated omp for, with different scheduling policies
The result will be slightly different from one run to another
if dynamic/guided scheduling is used
since the different orders of floating point operations
By C.Liao
**************************************************/
#include <stdio.h>
#ifdef _OPENMP
#include "omp.h"
#endif
static long num_steps = 10000000;
double step;
int k_3 = 100;
// int k_4=100;
int
main ()
{
double x, pi, sum = 0.0;
int i;
step = 1.0 / (double) num_steps;
int chunksize=100;
int lower =10, upper =100, stride = 3;
#pragma omp parallel private (x)
{
#pragma omp single
printf ("Running using %d threads..\n", omp_get_num_threads ());
#pragma omp for reduction(+:sum)
for (i = lower; i < upper; i+=stride)
{
k_3++;
x = (i - 0.5) * step;
sum = sum + 4.0 / (1.0 + x * x);
}
#pragma omp for schedule(static)
for (i = lower ; i <= upper; i+=stride)
{
k_3++;
}
#pragma omp for schedule(static,chunksize)
for (i = lower; i < upper; i+=stride)
{
k_3++;
}
#pragma omp for schedule(dynamic)
for (i = num_steps; i >-1 ; i++)
{
k_3++;
}
#pragma omp for schedule(dynamic, 5) ordered
for (i = lower; i <= upper; i+=stride)
{
k_3++;
}
#pragma omp for schedule(guided,5)
for (i = num_steps; i >= 0; i++)
{
k_3++;
}
}
pi = step * sum;
printf ("step:%e sum:%f PI=%.20f\n", step, sum, pi);
return 0;
}
|
Grid.h | #pragma once
#include "macros.h"
#include "GridTypes.h"
#include "ScalarField.h"
#include "Vectors.h"
#include "Constants.h"
namespace pfc {
enum InterpolationType {
Interpolation_CIC, Interpolation_TSC,
Interpolation_SecondOrder, Interpolation_FourthOrder, Interpolation_PCS
};
template<typename Data, GridTypes gridType_>
class Grid :
// next labels define some properties of grid
public LabelFieldsSpatialStraggered<gridType_>,
public LabelFieldsTimeStraggered<gridType_>
{
public:
static const GridTypes gridType = gridType_;
Grid(const Int3 & _numInternalCells,
const FP3 & minCoords, const FP3 & _steps,
const Int3 & globalGridDims);
Grid(const Int3 & _numAllCells,
const Int3 & globalGridDims); // for complex grids only
Grid(const Int3 & _numAllCells, const Int3 & globalGridDims,
Grid<FP, gridType_>* grid); // 'grid' and 'this' will have common memory
// copy constructor, can make shallow copies
Grid(const Grid& grid, bool ifShallowCopy = false);
forceinline const FP3 BxPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftBx;
}
forceinline const FP3 ByPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftBy;
}
forceinline const FP3 BzPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftBz;
}
forceinline const FP3 ExPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftEJx;
}
forceinline const FP3 EyPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftEJy;
}
forceinline const FP3 EzPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftEJz;
}
forceinline const FP3 JxPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftEJx;
}
forceinline const FP3 JyPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftEJy;
}
forceinline const FP3 JzPosition(int x, int y, int z) const
{
return baseCoords(x, y, z) + shiftEJz;
}
void getFieldsXYZ(FP x, FP y, FP z, FP3 & e, FP3 & b) const
{
FP3 coords(x, y, z);
getFields(coords, e, b);
}
void getFields(const FP3& coords, FP3 & e, FP3 & b) const
{
(this->*interpolationFields)(coords, e, b);
}
virtual FP3 getJ(const FP3& coords) const;
virtual FP3 getE(const FP3& coords) const;
virtual FP3 getB(const FP3& coords) const;
void getFieldsCIC(const FP3& coords, FP3 & e, FP3 & b) const;
void getFieldsTSC(const FP3& coords, FP3 & e, FP3 & b) const;
void getFieldsSecondOrder(const FP3& coords, FP3 & e, FP3 & b) const;
void getFieldsFourthOrder(const FP3& coords, FP3 & e, FP3 & b) const;
void getFieldsPCS(const FP3& coords, FP3 & e, FP3 & b) const;
FP getEx(const FP3& coords) const
{
return (this->*interpolationEx)(coords);
}
FP getEy(const FP3& coords) const
{
return (this->*interpolationEy)(coords);
}
FP getEz(const FP3& coords) const
{
return (this->*interpolationEz)(coords);
}
FP getBx(const FP3& coords) const
{
return (this->*interpolationBx)(coords);
}
FP getBy(const FP3& coords) const
{
return (this->*interpolationBy)(coords);
}
FP getBz(const FP3& coords) const
{
return (this->*interpolationBz)(coords);
}
FP getJx(const FP3& coords) const
{
return (this->*interpolationJx)(coords);
}
FP getJy(const FP3& coords) const
{
return (this->*interpolationJy)(coords);
}
FP getJz(const FP3& coords) const
{
return (this->*interpolationJz)(coords);
}
FP getExCIC(const FP3& coords) const {
return getFieldCIC(coords, Ex, shiftEJx);
}
FP getEyCIC(const FP3& coords) const {
return getFieldCIC(coords, Ey, shiftEJy);
}
FP getEzCIC(const FP3& coords) const {
return getFieldCIC(coords, Ez, shiftEJz);
}
FP getBxCIC(const FP3& coords) const {
return getFieldCIC(coords, Bx, shiftBx);
}
FP getByCIC(const FP3& coords) const {
return getFieldCIC(coords, By, shiftBy);
}
FP getBzCIC(const FP3& coords) const {
return getFieldCIC(coords, Bz, shiftBz);
}
FP getJxCIC(const FP3& coords) const {
return getFieldCIC(coords, Jx, shiftEJx);
}
FP getJyCIC(const FP3& coords) const {
return getFieldCIC(coords, Jy, shiftEJy);
}
FP getJzCIC(const FP3& coords) const {
return getFieldCIC(coords, Jz, shiftEJz);
}
FP getExTSC(const FP3& coords) const {
return getFieldTSC(coords, Ex, shiftEJx);
}
FP getEyTSC(const FP3& coords) const {
return getFieldTSC(coords, Ey, shiftEJy);
}
FP getEzTSC(const FP3& coords) const {
return getFieldTSC(coords, Ez, shiftEJz);
}
FP getBxTSC(const FP3& coords) const {
return getFieldTSC(coords, Bx, shiftBx);
}
FP getByTSC(const FP3& coords) const {
return getFieldTSC(coords, By, shiftBy);
}
FP getBzTSC(const FP3& coords) const {
return getFieldTSC(coords, Bz, shiftBz);
}
FP getJxTSC(const FP3& coords) const {
return getFieldTSC(coords, Jx, shiftEJx);
}
FP getJyTSC(const FP3& coords) const {
return getFieldTSC(coords, Jy, shiftEJy);
}
FP getJzTSC(const FP3& coords) const {
return getFieldTSC(coords, Jz, shiftEJz);
}
FP getExSecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, Ex, shiftEJx);
}
FP getEySecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, Ey, shiftEJy);
}
FP getEzSecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, Ez, shiftEJz);
}
FP getBxSecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, Bx, shiftBx);
}
FP getBySecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, By, shiftBy);
}
FP getBzSecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, Bz, shiftBz);
}
FP getJxSecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, Jx, shiftEJx);
}
FP getJySecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, Jy, shiftEJy);
}
FP getJzSecondOrder(const FP3& coords) const {
return getFieldSecondOrder(coords, Jz, shiftEJz);
}
FP getExFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, Ex, shiftEJx);
}
FP getEyFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, Ey, shiftEJy);
}
FP getEzFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, Ez, shiftEJz);
}
FP getBxFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, Bx, shiftBx);
}
FP getByFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, By, shiftBy);
}
FP getBzFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, Bz, shiftBz);
}
FP getJxFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, Jx, shiftEJx);
}
FP getJyFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, Jy, shiftEJy);
}
FP getJzFourthOrder(const FP3& coords) const {
return getFieldFourthOrder(coords, Jz, shiftEJz);
}
FP getExPCS(const FP3& coords) const {
return getFieldPCS(coords, Ex, shiftEJx);
}
FP getEyPCS(const FP3& coords) const {
return getFieldPCS(coords, Ey, shiftEJy);
}
FP getEzPCS(const FP3& coords) const {
return getFieldPCS(coords, Ez, shiftEJz);
}
FP getBxPCS(const FP3& coords) const {
return getFieldPCS(coords, Bx, shiftBx);
}
FP getByPCS(const FP3& coords) const {
return getFieldPCS(coords, By, shiftBy);
}
FP getBzPCS(const FP3& coords) const {
return getFieldPCS(coords, Bz, shiftBz);
}
FP getJxPCS(const FP3& coords) const {
return getFieldPCS(coords, Jx, shiftEJx);
}
FP getJyPCS(const FP3& coords) const {
return getFieldPCS(coords, Jy, shiftEJy);
}
FP getJzPCS(const FP3& coords) const {
return getFieldPCS(coords, Jz, shiftEJz);
}
/*void dumpE(FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx);
void dumpB(FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx);
void dumpCurrents(FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx);
void loadE(const FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx);
void loadB(const FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx);
void loadCurrents(const FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx);*/
/* Make all current density values zero. */
void zeroizeJ();
const Int3 getNumExternalLeftCells() const
{
Int3 result(2, 2, 2);
for (int d = 0; d < 3; d++)
if (globalGridDims[d] == 1)
result[d] = 0;
return result;
}
const Int3 getNumExternalRightCells() const
{
return getNumExternalLeftCells();
}
void setInterpolationType(InterpolationType type);
InterpolationType getInterpolationType() const;
const Int3 globalGridDims; // important to initialize it first
const FP3 steps;
const Int3 numInternalCells;
const Int3 numCells;
const Int3 sizeStorage; // sometimes can be larger than numCells
const FP3 origin;
const int dimensionality;
ScalarField<Data> Ex, Ey, Ez, Bx, By, Bz, Jx, Jy, Jz;
private:
// 3d shifts of the field in the cell
const FP3 shiftEJx, shiftEJy, shiftEJz,
shiftBx, shiftBy, shiftBz;
/* Get grid index and normalized internal coords in [0, 0, 0]..(1, 1, 1) for
given physical coords and shift. */
void getGridCoords(const FP3 & coords, const FP3 & shift, Int3 & idx,
FP3 & internalCoords) const
{
idx.x = (int)((coords.x - origin.x - shift.x) / steps.x);
idx.y = (int)((coords.y - origin.y - shift.y) / steps.y);
idx.z = (int)((coords.z - origin.z - shift.z) / steps.z);
internalCoords = (coords - baseCoords(idx.x, idx.y, idx.z) - shift) / steps;
}
void getClosestGridCoords(const FP3 & coords, const FP3 & shift, Int3 & idx,
FP3 & internalCoords) const
{
idx.x = (int)((coords.x - origin.x - shift.x) / steps.x + 0.5);
idx.y = (int)((coords.y - origin.y - shift.y) / steps.y + 0.5);
idx.z = (int)((coords.z - origin.z - shift.z) / steps.z + 0.5);
internalCoords = (coords - baseCoords(idx.x, idx.y, idx.z) - shift) / steps;
}
/* Get base coords of element (i, j, k) so that its real coords are
base coords + corresponding shift. */
forceinline const FP3 baseCoords(int i, int j, int k) const
{
return origin + FP3(i, j, k) * steps;
}
// if coords is inside of the area that grid defines
forceinline bool isInside(const FP3 & coords, const FP3 & shift) const
{
FP3 minCoords = origin + shift * steps;
FP3 maxCoords = minCoords + (numCells - Int3(1, 1, 1)) * steps;
return coords >= minCoords && coords <= maxCoords;
}
FP getFieldCIC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const;
FP getFieldTSC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const;
FP getFieldSecondOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const;
FP getFieldFourthOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const;
FP getFieldPCS(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const;
InterpolationType interpolationType;
void (Grid::*interpolationFields)(const FP3&, FP3&, FP3&) const;
FP (Grid::*interpolationEx)(const FP3&) const;
FP(Grid::*interpolationEy)(const FP3&) const;
FP(Grid::*interpolationEz)(const FP3&) const;
FP(Grid::*interpolationBx)(const FP3&) const;
FP(Grid::*interpolationBy)(const FP3&) const;
FP(Grid::*interpolationBz)(const FP3&) const;
FP(Grid::*interpolationJx)(const FP3&) const;
FP(Grid::*interpolationJy)(const FP3&) const;
FP(Grid::*interpolationJz)(const FP3&) const;
};
typedef Grid<FP, GridTypes::YeeGridType> YeeGrid;
typedef Grid<FP, GridTypes::StraightGridType> SimpleGrid;
typedef Grid<FP, GridTypes::PSTDGridType> PSTDGrid;
typedef Grid<FP, GridTypes::PSATDGridType> PSATDGrid;
typedef Grid<FP, GridTypes::PSATDTimeStraggeredGridType> PSATDTimeStraggeredGrid;
// create deep or shallow copy
template<typename Data, GridTypes gridType_>
inline Grid<Data, gridType_>::Grid(const Grid<Data, gridType_>& grid, bool ifShallowCopy) :
globalGridDims(grid.globalGridDims),
steps(grid.steps),
numInternalCells(grid.numInternalCells),
numCells(grid.numCells),
sizeStorage(grid.sizeStorage),
shiftEJx(grid.shiftEJx), shiftEJy(grid.shiftEJy), shiftEJz(grid.shiftEJz),
shiftBx(grid.shiftBx), shiftBy(grid.shiftBy), shiftBz(grid.shiftBz),
origin(grid.origin),
dimensionality(grid.dimensionality),
Ex(grid.Ex, ifShallowCopy),
Ey(grid.Ey, ifShallowCopy),
Ez(grid.Ez, ifShallowCopy),
Bx(grid.Bx, ifShallowCopy),
By(grid.By, ifShallowCopy),
Bz(grid.Bz, ifShallowCopy),
Jx(grid.Jx, ifShallowCopy),
Jy(grid.Jy, ifShallowCopy),
Jz(grid.Jz, ifShallowCopy)
{
setInterpolationType(grid.interpolationType);
}
template <>
inline Grid<FP, GridTypes::YeeGridType>::Grid(const Int3 & _numCells, const FP3 & minCoords,
const FP3 & _steps, const Int3 & _globalGridDims) :
globalGridDims(_globalGridDims),
steps(_steps),
numInternalCells(_numCells),
numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()),
sizeStorage(numCells),
Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage),
Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage),
Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage),
shiftEJx(FP3(0, 0.5, 0.5) * steps),
shiftEJy(FP3(0.5, 0, 0.5) * steps),
shiftEJz(FP3(0.5, 0.5, 0) * steps),
shiftBx(FP3(0.5, 0, 0) * steps),
shiftBy(FP3(0, 0.5, 0) * steps),
shiftBz(FP3(0, 0, 0.5) * steps),
origin(minCoords.x - steps.x * getNumExternalLeftCells().x,
minCoords.y - steps.y * getNumExternalLeftCells().y,
minCoords.z - steps.z * getNumExternalLeftCells().z),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
setInterpolationType(Interpolation_CIC);
}
template<>
inline Grid<FP, GridTypes::StraightGridType>::Grid(const Int3 & _numInternalCells,
const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) :
globalGridDims(_globalGridDims),
steps(_steps),
numInternalCells(_numInternalCells),
numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()),
sizeStorage(numCells),
Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage),
Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage),
Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
origin(minCoords.x - steps.x * getNumExternalLeftCells().x,
minCoords.y - steps.y * getNumExternalLeftCells().y,
minCoords.z - steps.z * getNumExternalLeftCells().z),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
// SPECTRAL GRIDS
// PSTD
template<>
inline Grid<complexFP, GridTypes::PSTDGridType>::Grid(const Int3 & _numInternalCells,
const Int3 & _globalGridDims) :
globalGridDims(_globalGridDims),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(numCells),
Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage),
Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage),
Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
template<>
inline Grid<complexFP, GridTypes::PSTDGridType>::Grid(const Int3 & _numInternalCells,
const Int3 & _globalGridDims, Grid<FP, GridTypes::PSTDGridType>* grid) :
globalGridDims(_globalGridDims),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(numCells),
Ex(reinterpret_cast<complexFP*>(grid->Ex.getData()), sizeStorage),
Ey(reinterpret_cast<complexFP*>(grid->Ey.getData()), sizeStorage),
Ez(reinterpret_cast<complexFP*>(grid->Ez.getData()), sizeStorage),
Bx(reinterpret_cast<complexFP*>(grid->Bx.getData()), sizeStorage),
By(reinterpret_cast<complexFP*>(grid->By.getData()), sizeStorage),
Bz(reinterpret_cast<complexFP*>(grid->Bz.getData()), sizeStorage),
Jx(reinterpret_cast<complexFP*>(grid->Jx.getData()), sizeStorage),
Jy(reinterpret_cast<complexFP*>(grid->Jy.getData()), sizeStorage),
Jz(reinterpret_cast<complexFP*>(grid->Jz.getData()), sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
template<>
inline Grid<FP, GridTypes::PSTDGridType>::Grid(const Int3 & _numInternalCells,
const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) :
globalGridDims(_globalGridDims),
steps(_steps),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(Int3(numCells.x, numCells.y, 2 * (numCells.z / 2 + 1))),
Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage),
Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage),
Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
origin(minCoords),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
// PSATD
template<>
inline Grid<complexFP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells,
const Int3 & _globalGridDims) :
globalGridDims(_globalGridDims),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(numCells),
Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage),
Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage),
Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
template<>
inline Grid<complexFP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells,
const Int3 & _globalGridDims, Grid<FP, GridTypes::PSATDGridType>* grid) :
globalGridDims(_globalGridDims),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(numCells),
Ex(reinterpret_cast<complexFP*>(grid->Ex.getData()), sizeStorage),
Ey(reinterpret_cast<complexFP*>(grid->Ey.getData()), sizeStorage),
Ez(reinterpret_cast<complexFP*>(grid->Ez.getData()), sizeStorage),
Bx(reinterpret_cast<complexFP*>(grid->Bx.getData()), sizeStorage),
By(reinterpret_cast<complexFP*>(grid->By.getData()), sizeStorage),
Bz(reinterpret_cast<complexFP*>(grid->Bz.getData()), sizeStorage),
Jx(reinterpret_cast<complexFP*>(grid->Jx.getData()), sizeStorage),
Jy(reinterpret_cast<complexFP*>(grid->Jy.getData()), sizeStorage),
Jz(reinterpret_cast<complexFP*>(grid->Jz.getData()), sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
template<>
inline Grid<FP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells,
const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) :
globalGridDims(_globalGridDims),
steps(_steps),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(Int3(numCells.x, numCells.y, 2 * (numCells.z / 2 + 1))),
Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage),
Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage),
Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
origin(minCoords),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
// PSATDTimeStraggered
template<>
inline Grid<complexFP, GridTypes::PSATDTimeStraggeredGridType>::Grid(const Int3 & _numInternalCells,
const Int3 & _globalGridDims) :
globalGridDims(_globalGridDims),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(numCells),
Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage),
Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage),
Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
// PSATDTimeStraggered
template<>
inline Grid<complexFP, GridTypes::PSATDTimeStraggeredGridType>::Grid(const Int3 & _numInternalCells,
const Int3 & _globalGridDims, Grid<FP, GridTypes::PSATDTimeStraggeredGridType>* grid) :
globalGridDims(_globalGridDims),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(numCells),
Ex(reinterpret_cast<complexFP*>(grid->Ex.getData()), sizeStorage),
Ey(reinterpret_cast<complexFP*>(grid->Ey.getData()), sizeStorage),
Ez(reinterpret_cast<complexFP*>(grid->Ez.getData()), sizeStorage),
Bx(reinterpret_cast<complexFP*>(grid->Bx.getData()), sizeStorage),
By(reinterpret_cast<complexFP*>(grid->By.getData()), sizeStorage),
Bz(reinterpret_cast<complexFP*>(grid->Bz.getData()), sizeStorage),
Jx(reinterpret_cast<complexFP*>(grid->Jx.getData()), sizeStorage),
Jy(reinterpret_cast<complexFP*>(grid->Jy.getData()), sizeStorage),
Jz(reinterpret_cast<complexFP*>(grid->Jz.getData()), sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
template<>
inline Grid<FP, GridTypes::PSATDTimeStraggeredGridType>::Grid(const Int3 & _numInternalCells,
const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) :
globalGridDims(_globalGridDims),
steps(_steps),
numInternalCells(_numInternalCells),
numCells(numInternalCells),
sizeStorage(Int3(numCells.x, numCells.y, 2 * (numCells.z / 2 + 1))),
Ex(sizeStorage), Ey(sizeStorage), Ez(sizeStorage),
Bx(sizeStorage), By(sizeStorage), Bz(sizeStorage),
Jx(sizeStorage), Jy(sizeStorage), Jz(sizeStorage),
shiftEJx(FP3(0, 0, 0) * steps),
shiftEJy(FP3(0, 0, 0) * steps),
shiftEJz(FP3(0, 0, 0) * steps),
shiftBx(FP3(0, 0, 0) * steps),
shiftBy(FP3(0, 0, 0) * steps),
shiftBz(FP3(0, 0, 0) * steps),
origin(minCoords),
dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1))
{
setInterpolationType(Interpolation_CIC);
}
// end SPECTRAL GRIDS
template< typename Data, GridTypes gT>
inline FP Grid<Data, gT>::getFieldCIC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const
{
Int3 idx;
FP3 internalCoords;
getGridCoords(coords, shift, idx, internalCoords);
return field.interpolateCIC(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline FP Grid<Data, gT>::getFieldTSC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const
{
Int3 idx;
FP3 internalCoords;
getClosestGridCoords(coords, shift, idx, internalCoords);
return field.interpolateTSC(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline FP Grid<Data, gT>::getFieldSecondOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const
{
Int3 idx;
FP3 internalCoords;
getClosestGridCoords(coords, shift, idx, internalCoords);
return field.interpolateSecondOrder(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline FP Grid<Data, gT>::getFieldFourthOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const
{
Int3 idx;
FP3 internalCoords;
getClosestGridCoords(coords, shift, idx, internalCoords);
return field.interpolateFourthOrder(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline FP Grid<Data, gT>::getFieldPCS(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const
{
Int3 idx;
FP3 internalCoords;
getGridCoords(coords, shift, idx, internalCoords);
return field.interpolatePCS(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline void Grid<Data, gT>::getFieldsCIC(const FP3& coords, FP3 & e, FP3 & b) const
{
/* For each component of E and B get grid index and internal coords,
use it as base index and coefficients of interpolation. */
Int3 idx;
FP3 internalCoords;
getGridCoords(coords, shiftEJx, idx, internalCoords);
e.x = Ex.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftEJy, idx, internalCoords);
e.y = Ey.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftEJz, idx, internalCoords);
e.z = Ez.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftBx, idx, internalCoords);
b.x = Bx.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftBy, idx, internalCoords);
b.y = By.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftBz, idx, internalCoords);
b.z = Bz.interpolateCIC(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline void Grid<Data, gT>::getFieldsTSC(const FP3& coords, FP3 & e, FP3 & b) const
{
Int3 idx;
FP3 internalCoords;
getClosestGridCoords(coords, shiftEJx, idx, internalCoords);
e.x = Ex.interpolateTSC(idx, internalCoords);
getClosestGridCoords(coords, shiftEJy, idx, internalCoords);
e.y = Ey.interpolateTSC(idx, internalCoords);
getClosestGridCoords(coords, shiftEJz, idx, internalCoords);
e.z = Ez.interpolateTSC(idx, internalCoords);
getClosestGridCoords(coords, shiftBx, idx, internalCoords);
b.x = Bx.interpolateTSC(idx, internalCoords);
getClosestGridCoords(coords, shiftBy, idx, internalCoords);
b.y = By.interpolateTSC(idx, internalCoords);
getClosestGridCoords(coords, shiftBz, idx, internalCoords);
b.z = Bz.interpolateTSC(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline void Grid<Data, gT>::getFieldsSecondOrder(const FP3& coords, FP3 & e, FP3 & b) const
{
Int3 idx;
FP3 internalCoords;
getClosestGridCoords(coords, shiftEJx, idx, internalCoords);
e.x = Ex.interpolateSecondOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftEJy, idx, internalCoords);
e.y = Ey.interpolateSecondOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftEJz, idx, internalCoords);
e.z = Ez.interpolateSecondOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftBx, idx, internalCoords);
b.x = Bx.interpolateSecondOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftBy, idx, internalCoords);
b.y = By.interpolateSecondOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftBz, idx, internalCoords);
b.z = Bz.interpolateSecondOrder(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline void Grid<Data, gT>::getFieldsFourthOrder(const FP3& coords, FP3 & e, FP3 & b) const
{
Int3 idx;
FP3 internalCoords;
getClosestGridCoords(coords, shiftEJx, idx, internalCoords);
e.x = Ex.interpolateFourthOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftEJy, idx, internalCoords);
e.y = Ey.interpolateFourthOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftEJz, idx, internalCoords);
e.z = Ez.interpolateFourthOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftBx, idx, internalCoords);
b.x = Bx.interpolateFourthOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftBy, idx, internalCoords);
b.y = By.interpolateFourthOrder(idx, internalCoords);
getClosestGridCoords(coords, shiftBz, idx, internalCoords);
b.z = Bz.interpolateFourthOrder(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline void Grid<Data, gT>::getFieldsPCS(const FP3& coords, FP3 & e, FP3 & b) const
{
Int3 idx;
FP3 internalCoords;
getGridCoords(coords, shiftEJx, idx, internalCoords);
e.x = Ex.interpolatePCS(idx, internalCoords);
getGridCoords(coords, shiftEJy, idx, internalCoords);
e.y = Ey.interpolatePCS(idx, internalCoords);
getGridCoords(coords, shiftEJz, idx, internalCoords);
e.z = Ez.interpolatePCS(idx, internalCoords);
getGridCoords(coords, shiftBx, idx, internalCoords);
b.x = Bx.interpolatePCS(idx, internalCoords);
getGridCoords(coords, shiftBy, idx, internalCoords);
b.y = By.interpolatePCS(idx, internalCoords);
getGridCoords(coords, shiftBz, idx, internalCoords);
b.z = Bz.interpolatePCS(idx, internalCoords);
}
template< typename Data, GridTypes gT>
inline FP3 Grid<Data, gT>::getJ(const FP3& coords) const
{
// zero fields are outside of area that grid defines
//if (!isInside(coords, shiftEJx) || !isInside(coords, shiftEJy) || !isInside(coords, shiftEJz))
// return FP3(0, 0, 0);
/* For each component of J get grid index and internal coords,
use it as base index and coefficients of interpolation. */
Int3 idx;
FP3 internalCoords;
FP3 j;
getGridCoords(coords, shiftEJx, idx, internalCoords);
j.x = Jx.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftEJy, idx, internalCoords);
j.y = Jy.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftEJz, idx, internalCoords);
j.z = Jz.interpolateCIC(idx, internalCoords);
return j;
}
template< typename Data, GridTypes gT>
inline FP3 Grid<Data, gT>::getE(const FP3& coords) const
{
// zero fields are outside of area that grid defines
//if (!isInside(coords, shiftEJx) || !isInside(coords, shiftEJy) || !isInside(coords, shiftEJz))
// return FP3(0, 0, 0);
/* For each component of J get grid index and internal coords,
use it as base index and coefficients of interpolation. */
Int3 idx;
FP3 internalCoords;
FP3 e;
getGridCoords(coords, shiftEJx, idx, internalCoords);
e.x = Ex.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftEJy, idx, internalCoords);
e.y = Ey.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftEJz, idx, internalCoords);
e.z = Ez.interpolateCIC(idx, internalCoords);
return e;
}
template< typename Data, GridTypes gT>
inline FP3 Grid<Data, gT>::getB(const FP3& coords) const
{
// zero fields are outside of area that grid defines
//if (!isInside(coords, shiftBx) || !isInside(coords, shiftBy) || !isInside(coords, shiftBz))
// return FP3(0, 0, 0);
/* For each component of J get grid index and internal coords,
use it as base index and coefficients of interpolation. */
Int3 idx;
FP3 internalCoords;
FP3 b;
getGridCoords(coords, shiftBx, idx, internalCoords);
b.x = Bx.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftBy, idx, internalCoords);
b.y = By.interpolateCIC(idx, internalCoords);
getGridCoords(coords, shiftBz, idx, internalCoords);
b.z = Bz.interpolateCIC(idx, internalCoords);
return b;
}
template< typename Data, GridTypes gT>
inline void Grid<Data, gT>::zeroizeJ()
{
Jx.zeroize();
Jy.zeroize();
Jz.zeroize();
}
template< typename Data, GridTypes gT>
inline void Grid<Data, gT>::setInterpolationType(InterpolationType type)
{
interpolationType = type;
switch (interpolationType)
{
case Interpolation_CIC:
interpolationFields = &Grid<Data, gT>::getFieldsCIC;
interpolationEx = &Grid<Data, gT>::getExCIC;
interpolationEy = &Grid<Data, gT>::getEyCIC;
interpolationEz = &Grid<Data, gT>::getEzCIC;
interpolationBx = &Grid<Data, gT>::getBxCIC;
interpolationBy = &Grid<Data, gT>::getByCIC;
interpolationBz = &Grid<Data, gT>::getBzCIC;
interpolationJx = &Grid<Data, gT>::getJxCIC;
interpolationJy = &Grid<Data, gT>::getJyCIC;
interpolationJz = &Grid<Data, gT>::getJzCIC; break;
case Interpolation_TSC:
interpolationFields = &Grid<Data, gT>::getFieldsTSC;
interpolationEx = &Grid<Data, gT>::getExTSC;
interpolationEy = &Grid<Data, gT>::getEyTSC;
interpolationEz = &Grid<Data, gT>::getEzTSC;
interpolationBx = &Grid<Data, gT>::getBxTSC;
interpolationBy = &Grid<Data, gT>::getByTSC;
interpolationBz = &Grid<Data, gT>::getBzTSC;
interpolationJx = &Grid<Data, gT>::getJxTSC;
interpolationJy = &Grid<Data, gT>::getJyTSC;
interpolationJz = &Grid<Data, gT>::getJzTSC; break;
case Interpolation_PCS:
interpolationFields = &Grid<Data, gT>::getFieldsPCS;
interpolationEx = &Grid<Data, gT>::getExPCS;
interpolationEy = &Grid<Data, gT>::getEyPCS;
interpolationEz = &Grid<Data, gT>::getEzPCS;
interpolationBx = &Grid<Data, gT>::getBxPCS;
interpolationBy = &Grid<Data, gT>::getByPCS;
interpolationBz = &Grid<Data, gT>::getBzPCS;
interpolationJx = &Grid<Data, gT>::getJxPCS;
interpolationJy = &Grid<Data, gT>::getJyPCS;
interpolationJz = &Grid<Data, gT>::getJzPCS; break;
case Interpolation_SecondOrder:
interpolationFields = &Grid<Data, gT>::getFieldsSecondOrder;
interpolationEx = &Grid<Data, gT>::getExSecondOrder;
interpolationEy = &Grid<Data, gT>::getEySecondOrder;
interpolationEz = &Grid<Data, gT>::getEzSecondOrder;
interpolationBx = &Grid<Data, gT>::getBxSecondOrder;
interpolationBy = &Grid<Data, gT>::getBySecondOrder;
interpolationBz = &Grid<Data, gT>::getBzSecondOrder;
interpolationJx = &Grid<Data, gT>::getJxSecondOrder;
interpolationJy = &Grid<Data, gT>::getJySecondOrder;
interpolationJz = &Grid<Data, gT>::getJzSecondOrder; break;
case Interpolation_FourthOrder:
interpolationFields = &Grid<Data, gT>::getFieldsFourthOrder;
interpolationEx = &Grid<Data, gT>::getExFourthOrder;
interpolationEy = &Grid<Data, gT>::getEyFourthOrder;
interpolationEz = &Grid<Data, gT>::getEzFourthOrder;
interpolationBx = &Grid<Data, gT>::getBxFourthOrder;
interpolationBy = &Grid<Data, gT>::getByFourthOrder;
interpolationBz = &Grid<Data, gT>::getBzFourthOrder;
interpolationJx = &Grid<Data, gT>::getJxFourthOrder;
interpolationJy = &Grid<Data, gT>::getJyFourthOrder;
interpolationJz = &Grid<Data, gT>::getJzFourthOrder; break;
}
}
template<typename Data, GridTypes gT>
inline InterpolationType Grid<Data, gT>::getInterpolationType() const
{
return interpolationType;
}
/*template<>
inline void Grid<FP, YeeGridType>::dumpB(FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx)
{
Int3 numCells = *maxCellIdx - *minCellIdx;
#pragma omp parallel for collapse(3)
for (int i = 0; i < numCells.x; ++i)
for (int j = 0; j < numCells.y; ++j)
for (int k = 0; k < numCells.z; ++k)
{
int idx = numCells.y * numCells.z * i + numCells.z * j + k;
Int3 nodeIdx = *minCellIdx + Int3(i, j, k);
b[idx].x = Bx(nodeIdx);
b[idx].y = By(nodeIdx);
b[idx].z = Bz(nodeIdx);
}
}
template<>
inline void Grid<FP, YeeGridType>::dumpE(FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx)
{
Int3 numCells = *maxCellIdx - *minCellIdx;
#pragma omp parallel for collapse(3)
for (int i = 0; i < numCells.x; ++i)
for (int j = 0; j < numCells.y; ++j)
for (int k = 0; k < numCells.z; ++k)
{
int idx = numCells.y * numCells.z * i + numCells.z * j + k;
Int3 nodeIdx = *minCellIdx + Int3(i, j, k);
e[idx].x = Ex(nodeIdx);
e[idx].y = Ey(nodeIdx);
e[idx].z = Ez(nodeIdx);
}
}
template<>
inline void Grid<FP, YeeGridType>::dumpCurrents(FP3 * currents, const Int3 * minCellIdx,
const Int3 * maxCellIdx)
{
Int3 numCells = *maxCellIdx - *minCellIdx;
#pragma omp parallel for collapse(3)
for (int i = 0; i < numCells.x; ++i)
for (int j = 0; j < numCells.y; ++j)
for (int k = 0; k < numCells.z; ++k)
{
int idx = numCells.y * numCells.z * i + numCells.z * j + k;
Int3 nodeIdx = *minCellIdx + Int3(i, j, k);
currents[idx].x = Jx(nodeIdx);
currents[idx].y = Jy(nodeIdx);
currents[idx].z = Jz(nodeIdx);
idx++;
}
}
template<>
inline void Grid<FP, YeeGridType>::loadE(const FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx)
{
Int3 numCells = *maxCellIdx - *minCellIdx;
#pragma omp parallel for collapse(3)
for (int i = 0; i < numCells.x; i++)
for (int j = 0; j < numCells.y; j++)
for (int k = 0; k < numCells.z; k++)
{
int idx = numCells.y * numCells.z * i + numCells.z * j + k;
Int3 nodeIdx = *minCellIdx + Int3(i, j, k);
Ex(nodeIdx) = e[idx].x;
Ey(nodeIdx) = e[idx].y;
Ez(nodeIdx) = e[idx].z;
}
}
template<>
inline void Grid<FP, YeeGridType>::loadB(const FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx)
{
Int3 numCells = *maxCellIdx - *minCellIdx;
#pragma omp parallel for collapse(3)
for (int i = 0; i < numCells.x; ++i)
for (int j = 0; j < numCells.y; ++j)
for (int k = 0; k < numCells.z; ++k)
{
int idx = numCells.y * numCells.z * i + numCells.z * j + k;
Int3 nodeIdx = *minCellIdx + Int3(i, j, k);
Bx(nodeIdx) = b[idx].x;
By(nodeIdx) = b[idx].y;
Bz(nodeIdx) = b[idx].z;
}
}
template<>
inline void Grid<FP, YeeGridType>::loadCurrents(const FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx)
{
Int3 numCells = *maxCellIdx - *minCellIdx;
#pragma omp parallel for collapse(3)
for (int i = 0; i < numCells.x; i++)
for (int j = 0; j < numCells.y; j++)
for (int k = 0; k < numCells.z; k++)
{
int idx = numCells.y * numCells.z * i + numCells.z * j + k;
Int3 nodeIdx = *minCellIdx + Int3(i, j, k);
Jx(nodeIdx) = currents[idx].x;
Jy(nodeIdx) = currents[idx].y;
Jz(nodeIdx) = currents[idx].z;
}
}*/
} |
DenseMatrix.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseMatrix.h
// \brief Header file for the OpenMP-based dense matrix SMP implementation
//
// Copyright (C) 2013 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseMatrix.h>
#include <blaze/math/expressions/SparseMatrix.h>
#include <blaze/math/Functions.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/traits/SubmatrixExprTrait.h>
#include <blaze/math/typetraits/AreSIMDCombinable.h>
#include <blaze/math/typetraits/IsDenseMatrix.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/logging/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
#include <blaze/util/typetraits/IsSame.h>
namespace blaze {
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<MT1> ET1;
typedef ElementType_<MT2> ET2;
typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget;
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size };
const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && AreSIMDCombinable<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be added.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP addition assignment of a
// dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<MT1> ET1;
typedef ElementType_<MT2> ET2;
typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget;
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size };
const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be added.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP addition assignment of a
// sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAddAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment
// of a dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<MT1> ET1;
typedef ElementType_<MT2> ET2;
typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget;
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size };
const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame_<ET1,ET2> );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( submatrix<aligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a sparse matrix to a dense
// matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment
// of a sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget;
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
UnalignedTarget target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a
// dense matrix. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSubAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< IsDenseMatrix<MT1> >
smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
stimuli.c | //
// Created by sachetto on 13/10/17.
//
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include "../alg/grid/grid.h"
#include "../config/stim_config.h"
#include "../config_helpers/config_helpers.h"
#include "../utils/utils.h"
#define SET_STIM_VALUE(i, stim_value) ((real *)(config->persistent_data))[i] = stim_value
#define ALLOCATE_STIMS() \
if(adaptive) { \
if(config->persistent_data) { \
free(config->persistent_data); \
} \
config->persistent_data = (real *)malloc(n_active * sizeof(real)); \
} \
else { \
if(!config->persistent_data) { \
config->persistent_data = (real *)malloc(n_active * sizeof(real)); \
} \
}
SET_SPATIAL_STIM(set_benchmark_spatial_stim) {
bool stim;
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
real stim_value;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(uint32_t i = 0; i < n_active; i++) {
stim = ac[i]->center.x > 5500.0;
stim &= ac[i]->center.x < 7000.0;
stim &= ac[i]->center.y < 1500.0;
stim &= ac[i]->center.z < 1500.0;
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
//((real*)(config->persistent_data))[i] = stim_value;
}
}
SET_SPATIAL_STIM(stim_if_x_less_than) {
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
bool stim;
real stim_value;
real_cpu x_limit = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, x_limit, config->config_data, "x_limit");
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(i = 0; i < n_active; i++) {
stim = ac[i]->center.x < x_limit;
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_if_y_less_than) {
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
bool stim;
real stim_value;
real_cpu y_limit = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, y_limit, config->config_data, "y_limit");
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for (i = 0; i < n_active; i++) {
stim = ac[i]->center.y < y_limit;
if (stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_if_z_less_than) {
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
bool stim;
real stim_value;
real_cpu z_limit = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, z_limit, config->config_data, "z_limit");
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for (i = 0; i < n_active; i++) {
stim = ac[i]->center.z < z_limit;
if (stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_if_y_greater_or_equal_than) {
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
bool stim;
real stim_value;
real_cpu y_limit = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, y_limit, config->config_data, "y_limit");
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for (i = 0; i < n_active; i++) {
stim = ac[i]->center.y >= y_limit;
if (stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(set_stim_from_file) {
char *stim_file = NULL;
GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR(stim_file, config->config_data, "stim_file");
size_t s_size;
bool stim;
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
real stim_value;
FILE *s_file = fopen(stim_file, "r");
if(!s_file) {
fprintf(stderr, "Error opening stim file %s! Exiting!\n", stim_file);
exit(EXIT_FAILURE);
}
fscanf(s_file, "%zu\n", &s_size);
real_cpu **cell_stims = (real_cpu **)malloc(sizeof(real_cpu *) * s_size);
for(int i = 0; i < s_size; i++) {
cell_stims[i] = (real_cpu *)malloc(sizeof(real_cpu) * 3);
if(cell_stims[i] == NULL) {
fprintf(stderr, "Failed to allocate memory for the stim file\n");
exit(0);
}
fscanf(s_file, "%lf %lf %lf\n", &cell_stims[i][0], &cell_stims[i][1], &cell_stims[i][2]);
}
sort_vector(cell_stims, s_size);
fclose(s_file);
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(i = 0; i < n_active; i++) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
int index = inside_mesh(cell_stims, center_x, center_y, center_z, 0, s_size - 1);
stim = (index != -1);
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_if_x_greater_equal_than) {
real_cpu x_limit = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, x_limit, config->config_data, "x_limit");
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
real stim_value;
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim_value)
for(i = 0; i < n_active; i++) {
bool stim = (ac[i]->center.x >= x_limit);
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_base_mouse) {
real_cpu stim_size = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, stim_size, config->config_data, "stim_size");
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
real stim_value;
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim_value)
for(i = 0; i < n_active; i++) {
bool stim;
stim = (ac[i]->center.x >= 3000.0 - stim_size) && (ac[i]->center.x <= 3000.0 + stim_size);
stim &= (ac[i]->center.y >= 2400.0 - stim_size) && (ac[i]->center.y <= 2400.0 + stim_size);
stim &= (ac[i]->center.z >= 300 - stim_size) && (ac[i]->center.z <= 300 + stim_size);
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_mouse_spiral) {
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
real stim_value;
int i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim_value)
for(i = 0; i < n_active; i++) {
bool stim;
stim = (ac[i]->center.x >= 3000.0) && (ac[i]->center.x <= 6000.0);
stim &= (ac[i]->center.y >= 1940.0) && (ac[i]->center.y <= 6100.0);
stim &= (ac[i]->center.z >= 2230.0) && (ac[i]->center.z <= 5800.0);
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_x_y_limits) {
real_cpu max_x = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_x, config->config_data, "max_x");
real_cpu min_x = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_x, config->config_data, "min_x");
real_cpu max_y = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_y, config->config_data, "max_y");
real_cpu min_y = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_y, config->config_data, "min_y");
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
real stim_value;
ALLOCATE_STIMS();
int i;
#pragma omp parallel for private(stim_value)
for(i = 0; i < n_active; i++) {
bool stim;
stim = (ac[i]->center.x >= min_x) && (ac[i]->center.x <= max_x);
stim &= (ac[i]->center.y >= min_y) && (ac[i]->center.y <= max_y);
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_x_y_z_limits) {
real_cpu max_x = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_x, config->config_data, "max_x");
real_cpu min_x = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_x, config->config_data, "min_x");
real_cpu max_y = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_y, config->config_data, "max_y");
real_cpu min_y = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_y, config->config_data, "min_y");
real_cpu max_z = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_z, config->config_data, "max_z");
real_cpu min_z = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_z, config->config_data, "min_z");
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
real stim_value;
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim_value)
for(i = 0; i < n_active; i++) {
bool stim;
stim = (ac[i]->center.x >= min_x) && (ac[i]->center.x <= max_x);
stim &= (ac[i]->center.y >= min_y) && (ac[i]->center.y <= max_y);
stim &= (ac[i]->center.z >= min_z) && (ac[i]->center.z <= max_z);
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
// ***********************************************************************************************************
// New Berg's stimulus
SET_SPATIAL_STIM(stim_if_inside_circle_than) {
bool stim;
real stim_value;
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
real_cpu center_x = 0.0;
real_cpu center_y = 0.0;
real_cpu center_z = 0.0;
real_cpu radius = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, center_x, config->config_data, "center_x");
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, center_y, config->config_data, "center_y");
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, center_z, config->config_data, "center_z");
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, radius, config->config_data, "radius");
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(i = 0; i < n_active; i++) {
real_cpu dist = sqrt(pow(ac[i]->center.x - center_x, 2) + pow(ac[i]->center.y - center_y, 2) +
pow(ac[i]->center.z - center_z, 2));
stim = dist <= radius;
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_if_id_less_than) {
bool stim;
real stim_value;
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
int id = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(int, id, config->config_data, "id_limit");
int i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(i = 0; i < n_active; i++) {
stim = i <= id;
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_if_id_greater_than) {
bool stim;
real stim_value;
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
int id = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(int, id, config->config_data, "id_limit");
int i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(i = 0; i < n_active; i++) {
stim = i >= id;
if(stim) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_concave) {
real_cpu max_x_1 = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_x_1, config->config_data, "max_x_1");
real_cpu min_x_1 = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_x_1, config->config_data, "min_x_1");
real_cpu max_y_1 = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_y_1, config->config_data, "max_y_1");
real_cpu min_y_1 = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_y_1, config->config_data, "min_y_1");
real_cpu max_x_2 = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_x_2, config->config_data, "max_x_2");
real_cpu min_x_2 = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_x_2, config->config_data, "min_x_2");
real_cpu max_y_2 = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, max_y_2, config->config_data, "max_y_2");
real_cpu min_y_2 = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, min_y_2, config->config_data, "min_y_2");
real stim_current = 0.0;
real stim_value;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
uint32_t i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim_value)
for(i = 0; i < n_active; i++) {
bool stim_1, stim_2;
// First corner
stim_1 = (ac[i]->center.x >= min_x_1) && (ac[i]->center.x <= max_x_1);
stim_1 &= (ac[i]->center.y >= min_y_1) && (ac[i]->center.y <= max_y_1);
// Second corner
stim_2 = (ac[i]->center.x >= min_x_2) && (ac[i]->center.x <= max_x_2);
stim_2 &= (ac[i]->center.y >= min_y_2) && (ac[i]->center.y <= max_y_2);
if(stim_1 || stim_2) {
stim_value = stim_current;
} else {
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
/*
SET_SPATIAL_STIM(stim_purkinje_if_id_less_than)
{
bool stim;
real stim_value;
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
int id = 0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(int, id, config->config_data, "id_limit");
int i;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(i = 0; i < n_active; i++)
{
stim = i <= id;
if(stim)
{
stim_value = stim_current;
}
else
{
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
}
SET_SPATIAL_STIM(stim_purkinje_if_x_less_than)
{
// Total number of cells on the grid Purkinje + Tissue
uint32_t n_active_tissue = the_grid->num_active_cells;
uint32_t n_active_purkinje = the_grid->the_purkinje->num_active_purkinje_cells;
uint32_t n_active = n_active_purkinje + n_active_tissue;
struct cell_node **ac_purkinje = the_grid->the_purkinje->purkinje_cells;
struct cell_node **ac_tissue = the_grid->active_cells;
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
bool stim;
real stim_value;
real_cpu x_limit = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, x_limit, config->config_data, "x_limit");
uint32_t i, j;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(i = 0; i < n_active_purkinje; i++)
{
stim = ac_purkinje[i]->center.x < x_limit;
if(stim)
{
stim_value = stim_current;
}
else
{
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
i = n_active_purkinje;
for(j = 0; j < n_active_tissue; j++)
{
stim = ac_tissue[j]->center.x < x_limit;
if(stim)
{
stim_value = stim_current;
}
else
{
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
i++;
}
}
SET_SPATIAL_STIM(stim_purkinje_if_x_greater_equal_than) {
// Total number of cells on the grid Purkinje + Tissue
uint32_t n_active_tissue = the_grid->num_active_cells;
uint32_t n_active_purkinje = the_grid->the_purkinje->num_active_purkinje_cells;
uint32_t n_active = n_active_purkinje + n_active_tissue;
struct cell_node **ac_purkinje = the_grid->the_purkinje->purkinje_cells;
struct cell_node **ac_tissue = the_grid->active_cells;
real stim_current = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, stim_current, config->config_data, "current");
bool stim;
real stim_value;
real_cpu x_limit = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real_cpu, x_limit, config->config_data, "x_limit");
uint32_t i, j;
ALLOCATE_STIMS();
#pragma omp parallel for private(stim, stim_value)
for(i = 0; i < n_active_tissue; i++)
{
stim = ac_tissue[i]->center.x >= x_limit;
if(stim)
{
stim_value = stim_current;
}
else
{
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
}
i = n_active_purkinje;
for(j = 0; j < n_active_purkinje; j++)
{
stim = ac_purkinje[j]->center.x >= x_limit;
if(stim)
{
stim_value = stim_current;
}
else
{
stim_value = 0.0;
}
SET_STIM_VALUE(i, stim_value);
i++;
}
}
*/ |
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4*4, inch, outch);
// G
const float ktm[4][3] = {
{ 1.0f, 0.0f, 0.0f},
{ 1.0f/2, 1.0f/2, 1.0f/2},
{ 1.0f/2, -1.0f/2, 1.0f/2},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i=0; i<4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<4; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<4; i++)
{
kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4*4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0+4, _d1);
_mm_storeu_ps(out_tm0+8, _d2);
_mm_storeu_ps(out_tm0+12, _d3);
#else
float d0[4],d1[4],d2[4],d3[4];
float w0[4],w1[4],w2[4],w3[4];
float t0[4],t1[4],t2[4],t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 4] = d1[n];
out_tm0[n+ 8] = d2[n];
out_tm0[n+12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
for (int i=0; i<tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float* output1_tm = out1_tm.row(i);
float* output2_tm = out2_tm.row(i);
float* output3_tm = out3_tm.row(i);
#if __AVX__
float zero_val = 0.f;
__m256 _sum0 = _mm256_broadcast_ss(&zero_val);
__m256 _sum0n = _mm256_broadcast_ss(&zero_val);
__m256 _sum1 = _mm256_broadcast_ss(&zero_val);
__m256 _sum1n = _mm256_broadcast_ss(&zero_val);
__m256 _sum2 = _mm256_broadcast_ss(&zero_val);
__m256 _sum2n = _mm256_broadcast_ss(&zero_val);
__m256 _sum3 = _mm256_broadcast_ss(&zero_val);
__m256 _sum3n = _mm256_broadcast_ss(&zero_val);
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q+1).row(i);
const float* r2 = bottom_blob_tm.channel(q+2).row(i);
const float* r3 = bottom_blob_tm.channel(q+3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0+8);
// k0
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0+8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1+8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2+8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3+8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k1
_r0 = _mm256_loadu_ps(r1);
_r0n = _mm256_loadu_ps(r1+8);
_k0 = _mm256_loadu_ps(k0+16);
_k0n = _mm256_loadu_ps(k0+24);
_k1 = _mm256_loadu_ps(k1+16);
_k1n = _mm256_loadu_ps(k1+24);
_k2 = _mm256_loadu_ps(k2+16);
_k2n = _mm256_loadu_ps(k2+24);
_k3 = _mm256_loadu_ps(k3+16);
_k3n = _mm256_loadu_ps(k3+24);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k2
_r0 = _mm256_loadu_ps(r2);
_r0n = _mm256_loadu_ps(r2+8);
_k0 = _mm256_loadu_ps(k0+32);
_k0n = _mm256_loadu_ps(k0+40);
_k1 = _mm256_loadu_ps(k1+32);
_k1n = _mm256_loadu_ps(k1+40);
_k2 = _mm256_loadu_ps(k2+32);
_k2n = _mm256_loadu_ps(k2+40);
_k3 = _mm256_loadu_ps(k3+32);
_k3n = _mm256_loadu_ps(k3+40);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k3
_r0 = _mm256_loadu_ps(r3);
_r0n = _mm256_loadu_ps(r3+8);
_k0 = _mm256_loadu_ps(k0+48);
_k0n = _mm256_loadu_ps(k0+56);
_k1 = _mm256_loadu_ps(k1+48);
_k1n = _mm256_loadu_ps(k1+56);
_k2 = _mm256_loadu_ps(k2+48);
_k2n = _mm256_loadu_ps(k2+56);
_k3 = _mm256_loadu_ps(k3+48);
_k3n = _mm256_loadu_ps(k3+56);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0+8);
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0+8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1+8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2+8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3+8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm+8, _sum0n);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output1_tm+8, _sum1n);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output2_tm+8, _sum2n);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output3_tm+8, _sum3n);
#else
float sum0[16] = {0.0f};
float sum1[16] = {0.0f};
float sum2[16] = {0.0f};
float sum3[16] = {0.0f};
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q+1).row(i);
const float* r2 = bottom_blob_tm.channel(q+2).row(i);
const float* r3 = bottom_blob_tm.channel(q+3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n=0; n<16; n++)
{
sum0[n] += r0[n] * k0[n];
k0 += 16;
sum0[n] += r1[n] * k0[n];
k0 += 16;
sum0[n] += r2[n] * k0[n];
k0 += 16;
sum0[n] += r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += r0[n] * k1[n];
k1 += 16;
sum1[n] += r1[n] * k1[n];
k1 += 16;
sum1[n] += r2[n] * k1[n];
k1 += 16;
sum1[n] += r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += r0[n] * k2[n];
k2 += 16;
sum2[n] += r1[n] * k2[n];
k2 += 16;
sum2[n] += r2[n] * k2[n];
k2 += 16;
sum2[n] += r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += r0[n] * k3[n];
k3 += 16;
sum3[n] += r1[n] * k3[n];
k3 += 16;
sum3[n] += r2[n] * k3[n];
k3 += 16;
sum3[n] += r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n=0; n<16; n++)
{
sum0[n] += r0[n] * k0[n];
sum1[n] += r0[n] * k1[n];
sum2[n] += r0[n] * k2[n];
sum3[n] += r0[n] * k3[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float sum0[16] = {0.0f};
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q+1).row(i);
const float* r2 = bottom_blob_tm.channel(q+2).row(i);
const float* r3 = bottom_blob_tm.channel(q+3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q+1);
const float* k2 = kernel0_tm.row(q+2);
const float* k3 = kernel0_tm.row(q+3);
for (int n=0; n<16; n++)
{
sum0[n] += r0[n] * k0[n];
sum0[n] += r1[n] * k1[n];
sum0[n] += r2[n] * k2[n];
sum0[n] += r3[n] * k3[n];
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
for (int n=0; n<16; n++)
{
sum0[n] += r0[n] * k0[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int j=0; j<nColBlocks; j++)
{
float* outRow0 = out.row(j*2);
float* outRow1 = out.row(j*2+1);
for(int i=0; i<nRowBlocks; i++)
{
float* out_tile = out_tm.row(j*nRowBlocks + i);
float s0[4],s1[4],s2[4],s3[4];
float w0[4],w1[4];
float d0[2],d1[2],d2[2],d3[2];
float o0[2],o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 4];
s2[n] = out_tile[n+ 8];
s3[n] = out_tile[n+12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0];
d1[0] = w0[1]; d1[1] = w1[1];
d2[0] = w0[2]; d2[1] = w1[2];
d3[0] = w0[3]; d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + bias0;
o1[n] = d1[n] - d2[n] + d3[n] + bias0;
}
// save to top blob tm
outRow0[0] = o0[0];
outRow0[1] = o0[1];
outRow1[0] = o1[0];
outRow1[1] = o1[1];
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6*6, inch, outch);
// G
const float ktm[6][3] = {
{ 1.0f/4, 0.0f, 0.0f},
{ -1.0f/6, -1.0f/6, -1.0f/6},
{ -1.0f/6, 1.0f/6, -1.0f/6},
{ 1.0f/24, 1.0f/12, 1.0f/6},
{ 1.0f/24, -1.0f/12, 1.0f/6},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i=0; i<6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<6; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<6; i++)
{
kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r=0; r<9; r++)
{
Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4);
int p = 0;
for (; p+7<outch; p+=8)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p+1);
const float* kernel2 = (const float*)kernel_tm.channel(p+2);
const float* kernel3 = (const float*)kernel_tm.channel(p+3);
const float* kernel4 = (const float*)kernel_tm.channel(p+4);
const float* kernel5 = (const float*)kernel_tm.channel(p+5);
const float* kernel6 = (const float*)kernel_tm.channel(p+6);
const float* kernel7 = (const float*)kernel_tm.channel(p+7);
float* ktmp = kernel_tm_test.channel(p/8);
for (int q=0; q<inch; q++)
{
ktmp[0] = kernel0[r*4+0];
ktmp[1] = kernel0[r*4+1];
ktmp[2] = kernel0[r*4+2];
ktmp[3] = kernel0[r*4+3];
ktmp[4] = kernel1[r*4+0];
ktmp[5] = kernel1[r*4+1];
ktmp[6] = kernel1[r*4+2];
ktmp[7] = kernel1[r*4+3];
ktmp[8] = kernel2[r*4+0];
ktmp[9] = kernel2[r*4+1];
ktmp[10] = kernel2[r*4+2];
ktmp[11] = kernel2[r*4+3];
ktmp[12] = kernel3[r*4+0];
ktmp[13] = kernel3[r*4+1];
ktmp[14] = kernel3[r*4+2];
ktmp[15] = kernel3[r*4+3];
ktmp[16] = kernel4[r*4+0];
ktmp[17] = kernel4[r*4+1];
ktmp[18] = kernel4[r*4+2];
ktmp[19] = kernel4[r*4+3];
ktmp[20] = kernel5[r*4+0];
ktmp[21] = kernel5[r*4+1];
ktmp[22] = kernel5[r*4+2];
ktmp[23] = kernel5[r*4+3];
ktmp[24] = kernel6[r*4+0];
ktmp[25] = kernel6[r*4+1];
ktmp[26] = kernel6[r*4+2];
ktmp[27] = kernel6[r*4+3];
ktmp[28] = kernel7[r*4+0];
ktmp[29] = kernel7[r*4+1];
ktmp[30] = kernel7[r*4+2];
ktmp[31] = kernel7[r*4+3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p+3<outch; p+=4)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p+1);
const float* kernel2 = (const float*)kernel_tm.channel(p+2);
const float* kernel3 = (const float*)kernel_tm.channel(p+3);
float* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4);
for (int q=0; q<inch; q++)
{
ktmp[0] = kernel0[r*4+0];
ktmp[1] = kernel0[r*4+1];
ktmp[2] = kernel0[r*4+2];
ktmp[3] = kernel0[r*4+3];
ktmp[4] = kernel1[r*4+0];
ktmp[5] = kernel1[r*4+1];
ktmp[6] = kernel1[r*4+2];
ktmp[7] = kernel1[r*4+3];
ktmp[8] = kernel2[r*4+0];
ktmp[9] = kernel2[r*4+1];
ktmp[10] = kernel2[r*4+2];
ktmp[11] = kernel2[r*4+3];
ktmp[12] = kernel3[r*4+0];
ktmp[13] = kernel3[r*4+1];
ktmp[14] = kernel3[r*4+2];
ktmp[15] = kernel3[r*4+3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p<outch; p++)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
float* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4);
for (int q=0; q<inch; q++)
{
ktmp[0] = kernel0[r*4+0];
ktmp[1] = kernel0[r*4+1];
ktmp[2] = kernel0[r*4+2];
ktmp[3] = kernel0[r*4+3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles*9, elemsize, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row(q);
float* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row(q);
float* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row(q);
float* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row(q);
float* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row(q);
float* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row(q);
float* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row(q);
float* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row(q);
float* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row(q);
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
{
_t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5];
_t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5];
_t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5];
_t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5];
_t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5];
_t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5];
}
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3];
out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1];
out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5];
out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3];
out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1];
out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5];
out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3];
out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1];
out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5];
#else
float d0[6],d1[6],d2[6],d3[6],d4[6],d5[6];
float w0[6],w1[6],w2[6],w3[6],w4[6],w5[6];
float t0[6],t1[6],t2[6],t3[6],t4[6],t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4*d0[n] - 5*d2[n] + d4[n];
w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n];
w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n];
w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n];
w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n];
w5[n] = 4*d1[n] - 5*d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5];
t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5];
t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4*t0[n] - 5*t2[n] + t4[n];
d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n];
d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n];
d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n];
d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n];
d5[n] = 4*t1[n] - 5*t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3];
out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1];
out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5];
out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3];
out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1];
out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5];
out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3];
out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1];
out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r=0; r<9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p+1);
float* output2_tm = top_blob_tm.channel(p+2);
float* output3_tm = top_blob_tm.channel(p+3);
float* output4_tm = top_blob_tm.channel(p+4);
float* output5_tm = top_blob_tm.channel(p+5);
float* output6_tm = top_blob_tm.channel(p+6);
float* output7_tm = top_blob_tm.channel(p+7);
output0_tm = output0_tm + r*4;
output1_tm = output1_tm + r*4;
output2_tm = output2_tm + r*4;
output3_tm = output3_tm + r*4;
output4_tm = output4_tm + r*4;
output5_tm = output5_tm + r*4;
output6_tm = output6_tm + r*4;
output7_tm = output7_tm + r*4;
for (int i=0; i<tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p/8);
const float* r0 = bottom_blob_tm.channel(tiles*r+i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q=0;
for (; q+3<inch; q=q+4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0+4);
__m128 _r2 = _mm_loadu_ps(r0+8);
__m128 _r3 = _mm_loadu_ps(r0+12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr+4);
__m128 _k2 = _mm_loadu_ps(kptr+8);
__m128 _k3 = _mm_loadu_ps(kptr+12);
__m128 _k4 = _mm_loadu_ps(kptr+16);
__m128 _k5 = _mm_loadu_ps(kptr+20);
__m128 _k6 = _mm_loadu_ps(kptr+24);
__m128 _k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr+4);
_k2 = _mm_loadu_ps(kptr+8);
_k3 = _mm_loadu_ps(kptr+12);
_k4 = _mm_loadu_ps(kptr+16);
_k5 = _mm_loadu_ps(kptr+20);
_k6 = _mm_loadu_ps(kptr+24);
_k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr+4);
_k2 = _mm_loadu_ps(kptr+8);
_k3 = _mm_loadu_ps(kptr+12);
_k4 = _mm_loadu_ps(kptr+16);
_k5 = _mm_loadu_ps(kptr+20);
_k6 = _mm_loadu_ps(kptr+24);
_k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr+4);
_k2 = _mm_loadu_ps(kptr+8);
_k3 = _mm_loadu_ps(kptr+12);
_k4 = _mm_loadu_ps(kptr+16);
_k5 = _mm_loadu_ps(kptr+20);
_k6 = _mm_loadu_ps(kptr+24);
_k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q<inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr+4);
__m128 _k2 = _mm_loadu_ps(kptr+8);
__m128 _k3 = _mm_loadu_ps(kptr+12);
__m128 _k4 = _mm_loadu_ps(kptr+16);
__m128 _k5 = _mm_loadu_ps(kptr+20);
__m128 _k6 = _mm_loadu_ps(kptr+24);
__m128 _k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q=0; q<inch; q++)
{
for (int n=0; n<4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n+4];
sum2[n] += r0[n] * kptr[n+8];
sum3[n] += r0[n] * kptr[n+12];
sum4[n] += r0[n] * kptr[n+16];
sum5[n] += r0[n] * kptr[n+20];
sum6[n] += r0[n] * kptr[n+24];
sum7[n] += r0[n] * kptr[n+28];
}
kptr += 32;
r0 += 4;
}
for (int n=0; n<4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p+1);
float* output2_tm = top_blob_tm.channel(p+2);
float* output3_tm = top_blob_tm.channel(p+3);
output0_tm = output0_tm + r*4;
output1_tm = output1_tm + r*4;
output2_tm = output2_tm + r*4;
output3_tm = output3_tm + r*4;
for (int i=0; i<tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4);
const float* r0 = bottom_blob_tm.channel(tiles*r+i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q=0; q<inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr+4);
__m128 _k2 = _mm_loadu_ps(kptr+8);
__m128 _k3 = _mm_loadu_ps(kptr+12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q=0; q<inch; q++)
{
for (int n=0; n<4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n+4];
sum2[n] += r0[n] * kptr[n+8];
sum3[n] += r0[n] * kptr[n+12];
}
kptr += 16;
r0 += 4;
}
for (int n=0; n<4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p=remain_outch_start; p<outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r*4;
for (int i=0; i<tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4);
const float* r0 = bottom_blob_tm.channel(tiles*r+i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q=0; q<inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q=0; q<inch; q++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n=0; n<4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// float* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const float* r0 = bottom_blob_tm.channel(q).row<float>(i);
// const float* k0 = kernel0_tm.row<float>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
float* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j=0; j<nColBlocks; j++)
{
for(int i=0; i<nRowBlocks; i++)
{
// TODO AVX2
float s0[6],s1[6],s2[6],s3[6],s4[6],s5[6];
float w0[6],w1[6],w2[6],w3[6];
float d0[4],d1[4],d2[4],d3[4],d4[4],d5[4];
float o0[4],o1[4],o2[4],o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 6];
s2[n] = out_tile[n+12];
s3[n] = out_tile[n+18];
s4[n] = out_tile[n+24];
s5[n] = out_tile[n+30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n];
w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n];
w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0];
d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1];
d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2];
d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3];
d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4];
d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n];
o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n];
o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads);
}
static void conv3x3s2_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float *outptr = out;
const float *img = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float *r0 = img;
const float *r1 = img + w;
const float *r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
} |
tetrahedron_method.c | /* Copyright (C) 2014 Atsushi Togo */
/* All rights reserved. */
/* This file was originally part of spglib and is part of kspclib. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* tetrahedron_method.c */
/* Copyright (C) 2014 Atsushi Togo */
#include <stddef.h>
#include <kgrid.h>
#include "tetrahedron_method.h"
#ifdef THMWARNING
#include <stdio.h>
#define warning_print(...) fprintf(stderr,__VA_ARGS__)
#else
#define warning_print(...)
#endif
/* 6-------7 */
/* /| /| */
/* / | / | */
/* 4-------5 | */
/* | 2----|--3 */
/* | / | / */
/* |/ |/ */
/* 0-------1 */
/* */
/* i: vec neighbours */
/* 0: O 1, 2, 4 */
/* 1: a 0, 3, 5 */
/* 2: b 0, 3, 6 */
/* 3: a + b 1, 2, 7 */
/* 4: c 0, 5, 6 */
/* 5: c + a 1, 4, 7 */
/* 6: c + b 2, 4, 7 */
/* 7: c + a + b 3, 5, 6 */
static int main_diagonals[4][3] = {{ 1, 1, 1}, /* 0-7 */
{-1, 1, 1}, /* 1-6 */
{ 1,-1, 1}, /* 2-5 */
{ 1, 1,-1}}; /* 3-4 */
static int db_relative_grid_address[4][24][4][3] = {
{
{ { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 1, 1, 1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, 0, 1}, { 1, 1, 1}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 1, 1, 1}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, { 1, 1, 1}, },
{ { 0, 0, 0}, { 0, 0, 1}, { 1, 0, 1}, { 1, 1, 1}, },
{ { 0, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, { 1, 1, 1}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, 0, 1}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 0, 0, 1}, { 1, 0, 1}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 0, 0, 1}, {-1, -1, 0}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 0, 0, 1}, {-1, -1, 0}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 0, 0, -1}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 0, 0, -1}, },
{ { 0, 0, 0}, { 0, 1, 0}, {-1, 0, -1}, { 0, 0, -1}, },
{ { 0, 0, 0}, { 0, 1, 0}, {-1, 0, -1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, -1, -1}, { 0, 0, -1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, -1, -1}, { 0, -1, 0}, },
{ { 0, 0, 0}, {-1, -1, -1}, { 0, -1, -1}, { 0, 0, -1}, },
{ { 0, 0, 0}, {-1, -1, -1}, { 0, -1, -1}, { 0, -1, 0}, },
{ { 0, 0, 0}, {-1, -1, -1}, {-1, 0, -1}, { 0, 0, -1}, },
{ { 0, 0, 0}, {-1, -1, -1}, {-1, 0, -1}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, -1, -1}, {-1, -1, 0}, { 0, -1, 0}, },
{ { 0, 0, 0}, {-1, -1, -1}, {-1, -1, 0}, {-1, 0, 0}, },
},
{
{ { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, },
{ { 0, 0, 0}, {-1, 1, 0}, {-1, 1, 1}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, 0, 1}, {-1, 1, 1}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, {-1, 1, 1}, },
{ { 0, 0, 0}, { 0, 1, 0}, {-1, 1, 1}, { 0, 1, 1}, },
{ { 0, 0, 0}, {-1, 0, 1}, { 0, 0, 1}, {-1, 1, 1}, },
{ { 0, 0, 0}, { 0, 0, 1}, {-1, 1, 1}, { 0, 1, 1}, },
{ { 0, 0, 0}, { 0, 0, 1}, { 0, -1, 0}, { 1, -1, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, 0, 1}, { 1, -1, 0}, },
{ { 0, 0, 0}, {-1, 0, 1}, { 0, -1, 0}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, 0, 1}, { 0, 0, 1}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 0, 0, -1}, { 1, 0, -1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 1, 0, -1}, },
{ { 0, 0, 0}, {-1, 1, 0}, { 0, 0, -1}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 0, 0, -1}, },
{ { 0, 0, 0}, { 0, -1, -1}, { 1, -1, -1}, { 0, 0, -1}, },
{ { 0, 0, 0}, { 0, -1, -1}, { 1, -1, -1}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 1, -1, -1}, { 0, 0, -1}, { 1, 0, -1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, -1, -1}, { 1, 0, -1}, },
{ { 0, 0, 0}, { 1, -1, -1}, { 0, -1, 0}, { 1, -1, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, -1, -1}, { 1, -1, 0}, },
{ { 0, 0, 0}, { 0, -1, -1}, { 0, 0, -1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 0, -1, -1}, { 0, -1, 0}, {-1, 0, 0}, },
},
{
{ { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 1, 0, 1}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 0, 0, 1}, { 1, 0, 1}, },
{ { 0, 0, 0}, {-1, 1, 0}, { 0, 0, 1}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 0, 0, 1}, },
{ { 0, 0, 0}, { 1, -1, 1}, { 0, -1, 0}, { 1, -1, 0}, },
{ { 0, 0, 0}, { 0, -1, 1}, { 1, -1, 1}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, -1, 1}, { 1, -1, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, -1, 1}, { 1, 0, 1}, },
{ { 0, 0, 0}, { 0, -1, 1}, { 1, -1, 1}, { 0, 0, 1}, },
{ { 0, 0, 0}, { 1, -1, 1}, { 0, 0, 1}, { 1, 0, 1}, },
{ { 0, 0, 0}, { 0, -1, 1}, { 0, -1, 0}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 0, -1, 1}, { 0, 0, 1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, },
{ { 0, 0, 0}, {-1, 0, -1}, { 0, 0, -1}, {-1, 1, -1}, },
{ { 0, 0, 0}, {-1, 0, -1}, {-1, 1, -1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 0, 0, -1}, {-1, 1, -1}, { 0, 1, -1}, },
{ { 0, 0, 0}, { 0, 1, 0}, {-1, 1, -1}, { 0, 1, -1}, },
{ { 0, 0, 0}, {-1, 1, 0}, {-1, 1, -1}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, {-1, 1, -1}, },
{ { 0, 0, 0}, { 0, 0, -1}, { 0, -1, 0}, { 1, -1, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, 0, -1}, { 1, -1, 0}, },
{ { 0, 0, 0}, {-1, 0, -1}, { 0, 0, -1}, { 0, -1, 0}, },
{ { 0, 0, 0}, {-1, 0, -1}, { 0, -1, 0}, {-1, 0, 0}, },
},
{
{ { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 0, 0, 1}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 0, 0, 1}, },
{ { 0, 0, 0}, { 0, 1, 0}, {-1, 0, 1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 0, 1, 0}, {-1, 0, 1}, { 0, 0, 1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, -1, 1}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 0, -1, 1}, { 0, 0, 1}, },
{ { 0, 0, 0}, {-1, -1, 1}, {-1, -1, 0}, { 0, -1, 0}, },
{ { 0, 0, 0}, {-1, -1, 1}, {-1, -1, 0}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, -1, 1}, { 0, -1, 1}, { 0, -1, 0}, },
{ { 0, 0, 0}, {-1, -1, 1}, {-1, 0, 1}, {-1, 0, 0}, },
{ { 0, 0, 0}, {-1, -1, 1}, { 0, -1, 1}, { 0, 0, 1}, },
{ { 0, 0, 0}, {-1, -1, 1}, {-1, 0, 1}, { 0, 0, 1}, },
{ { 0, 0, 0}, { 0, 0, -1}, { 1, 0, -1}, { 1, 1, -1}, },
{ { 0, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, { 1, 1, -1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, 0, -1}, { 1, 1, -1}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, { 1, 1, -1}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 1, 1, -1}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 1, 1, -1}, },
{ { 0, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, {-1, 0, 0}, },
{ { 0, 0, 0}, { 0, 0, -1}, { 1, 0, -1}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 1, 0, 0}, { 1, 0, -1}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 0, 0, -1}, {-1, -1, 0}, { 0, -1, 0}, },
{ { 0, 0, 0}, { 0, 0, -1}, {-1, -1, 0}, {-1, 0, 0}, },
},
};
static void
get_integration_weight_at_omegas(double *integration_weights,
const int num_omegas,
const double *omegas,
THMCONST double tetrahedra_omegas[24][4],
double (*gn)(const int,
const double,
const double[4]),
double (*IJ)(const int,
const int,
const double,
const double[4]));
static double
get_integration_weight(const double omega,
THMCONST double tetrahedra_omegas[24][4],
double (*gn)(const int,
const double,
const double[4]),
double (*IJ)(const int,
const int,
const double,
const double[4]));
static int get_main_diagonal(THMCONST double rec_lattice[3][3]);
static int sort_omegas(double v[4]);
static double norm_squared_d3(const double a[3]);
static void multiply_matrix_vector_di3(double v[3],
THMCONST double a[3][3],
const int b[3]);
static double _f(const int n,
const int m,
const double omega,
const double vertices_omegas[4]);
static double _J(const int i,
const int ci,
const double omega,
const double vertices_omegas[4]);
static double _I(const int i,
const int ci,
const double omega,
const double vertices_omegas[4]);
static double _n(const int i,
const double omega,
const double vertices_omegas[4]);
static double _g(const int i,
const double omega,
const double vertices_omegas[4]);
static double _n_0(void);
static double _n_1(const double omega,
const double vertices_omegas[4]);
static double _n_2(const double omega,
const double vertices_omegas[4]);
static double _n_3(const double omega,
const double vertices_omegas[4]);
static double _n_4(void);
static double _g_0(void);
static double _g_1(const double omega,
const double vertices_omegas[4]);
static double _g_2(const double omega,
const double vertices_omegas[4]);
static double _g_3(const double omega,
const double vertices_omegas[4]);
static double _g_4(void);
static double _J_0(void);
static double _J_10(const double omega,
const double vertices_omegas[4]);
static double _J_11(const double omega,
const double vertices_omegas[4]);
static double _J_12(const double omega,
const double vertices_omegas[4]);
static double _J_13(const double omega,
const double vertices_omegas[4]);
static double _J_20(const double omega,
const double vertices_omegas[4]);
static double _J_21(const double omega,
const double vertices_omegas[4]);
static double _J_22(const double omega,
const double vertices_omegas[4]);
static double _J_23(const double omega,
const double vertices_omegas[4]);
static double _J_30(const double omega,
const double vertices_omegas[4]);
static double _J_31(const double omega,
const double vertices_omegas[4]);
static double _J_32(const double omega,
const double vertices_omegas[4]);
static double _J_33(const double omega,
const double vertices_omegas[4]);
static double _J_4(void);
static double _I_0(void);
static double _I_10(const double omega,
const double vertices_omegas[4]);
static double _I_11(const double omega,
const double vertices_omegas[4]);
static double _I_12(const double omega,
const double vertices_omegas[4]);
static double _I_13(const double omega,
const double vertices_omegas[4]);
static double _I_20(const double omega,
const double vertices_omegas[4]);
static double _I_21(const double omega,
const double vertices_omegas[4]);
static double _I_22(const double omega,
const double vertices_omegas[4]);
static double _I_23(const double omega,
const double vertices_omegas[4]);
static double _I_30(const double omega,
const double vertices_omegas[4]);
static double _I_31(const double omega,
const double vertices_omegas[4]);
static double _I_32(const double omega,
const double vertices_omegas[4]);
static double _I_33(const double omega,
const double vertices_omegas[4]);
static double _I_4(void);
void thm_get_relative_grid_address(int relative_grid_address[24][4][3],
THMCONST double rec_lattice[3][3])
{
int i, j, k, main_diag_index;
main_diag_index = get_main_diagonal(rec_lattice);
for (i = 0; i < 24; i++) {
for (j = 0; j < 4; j++) {
for (k = 0; k < 3; k++) {
relative_grid_address[i][j][k] =
db_relative_grid_address[main_diag_index][i][j][k];
}
}
}
}
void thm_get_all_relative_grid_address(int relative_grid_address[4][24][4][3])
{
int i, j, k, main_diag_index;
for (main_diag_index = 0; main_diag_index < 4; main_diag_index++) {
for (i = 0; i < 24; i++) {
for (j = 0; j < 4; j++) {
for (k = 0; k < 3; k++) {
relative_grid_address[main_diag_index][i][j][k] =
db_relative_grid_address[main_diag_index][i][j][k];
}
}
}
}
}
double thm_get_integration_weight(const double omega,
THMCONST double tetrahedra_omegas[24][4],
const char function)
{
if (function == 'I') {
return get_integration_weight(omega,
tetrahedra_omegas,
_g, _I);
} else {
return get_integration_weight(omega,
tetrahedra_omegas,
_n, _J);
}
}
void
thm_get_integration_weight_at_omegas(double *integration_weights,
const int num_omegas,
const double *omegas,
THMCONST double tetrahedra_omegas[24][4],
const char function)
{
if (function == 'I') {
get_integration_weight_at_omegas(integration_weights,
num_omegas,
omegas,
tetrahedra_omegas,
_g, _I);
} else {
get_integration_weight_at_omegas(integration_weights,
num_omegas,
omegas,
tetrahedra_omegas,
_n, _J);
}
}
void thm_get_neighboring_grid_points(int neighboring_grid_points[],
const int grid_point,
THMCONST int relative_grid_address[][3],
const int num_relative_grid_address,
const int mesh[3],
THMCONST int bz_grid_address[][3],
const int bz_map[])
{
int bzmesh[3], address_double[3], bz_address_double[3];
int i, j, bz_gp;
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
}
for (i = 0; i < num_relative_grid_address; i++) {
for (j = 0; j < 3; j++) {
address_double[j] = (bz_grid_address[grid_point][j] +
relative_grid_address[i][j]) * 2;
bz_address_double[j] = address_double[j];
}
bz_gp = bz_map[kgd_get_grid_point_double_mesh(bz_address_double, bzmesh)];
if (bz_gp == -1) {
neighboring_grid_points[i] =
kgd_get_grid_point_double_mesh(address_double, mesh);
} else {
neighboring_grid_points[i] = bz_gp;
}
}
}
void
thm_get_dense_neighboring_grid_points(size_t neighboring_grid_points[],
const size_t grid_point,
THMCONST int relative_grid_address[][3],
const int num_relative_grid_address,
const int mesh[3],
THMCONST int bz_grid_address[][3],
const size_t bz_map[])
{
int bzmesh[3], address_double[3], bz_address_double[3];
int i;
size_t j, bz_gp, prod_bz_mesh;
prod_bz_mesh = 1;
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
prod_bz_mesh *= bzmesh[i];
}
for (i = 0; i < num_relative_grid_address; i++) {
for (j = 0; j < 3; j++) {
address_double[j] = (bz_grid_address[grid_point][j] +
relative_grid_address[i][j]) * 2;
bz_address_double[j] = address_double[j];
}
bz_gp = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)];
if (bz_gp == prod_bz_mesh) {
neighboring_grid_points[i] =
kgd_get_dense_grid_point_double_mesh(address_double, mesh);
} else {
neighboring_grid_points[i] = bz_gp;
}
}
}
static void
get_integration_weight_at_omegas(double *integration_weights,
const int num_omegas,
const double *omegas,
THMCONST double tetrahedra_omegas[24][4],
double (*gn)(const int,
const double,
const double[4]),
double (*IJ)(const int,
const int,
const double,
const double[4]))
{
int i;
#pragma omp parallel for
for (i = 0; i < num_omegas; i++) {
integration_weights[i] = get_integration_weight(omegas[i],
tetrahedra_omegas,
gn, IJ);
}
}
static double
get_integration_weight(const double omega,
THMCONST double tetrahedra_omegas[24][4],
double (*gn)(const int,
const double,
const double[4]),
double (*IJ)(const int,
const int,
const double,
const double[4]))
{
int i, j, ci;
double sum;
double v[4];
sum = 0;
for (i = 0; i < 24; i++) {
for (j = 0; j < 4; j++) {
v[j] = tetrahedra_omegas[i][j];
}
ci = sort_omegas(v);
if (omega < v[0]) {
sum += IJ(0, ci, omega, v) * gn(0, omega, v);
} else {
if (v[0] < omega && omega < v[1]) {
sum += IJ(1, ci, omega, v) * gn(1, omega, v);
} else {
if (v[1] < omega && omega < v[2]) {
sum += IJ(2, ci, omega, v) * gn(2, omega, v);
} else {
if (v[2] < omega && omega < v[3]) {
sum += IJ(3, ci, omega, v) * gn(3, omega, v);
} else {
if (v[3] < omega) {
sum += IJ(4, ci, omega, v) * gn(4, omega, v);
}
}
}
}
}
}
return sum / 6;
}
static int sort_omegas(double v[4])
{
int i;
double w[4];
i = 0;
if (v[0] > v[1]) {
w[0] = v[1];
w[1] = v[0];
i = 1;
} else {
w[0] = v[0];
w[1] = v[1];
}
if (v[2] > v[3]) {
w[2] = v[3];
w[3] = v[2];
} else {
w[2] = v[2];
w[3] = v[3];
}
if (w[0] > w[2]) {
v[0] = w[2];
v[1] = w[0];
if (i == 0) {
i = 4;
}
} else {
v[0] = w[0];
v[1] = w[2];
}
if (w[1] > w[3]) {
v[3] = w[1];
v[2] = w[3];
if (i == 1) {
i = 3;
}
} else {
v[3] = w[3];
v[2] = w[1];
if (i == 1) {
i = 5;
}
}
if (v[1] > v[2]) {
w[1] = v[1];
v[1] = v[2];
v[2] = w[1];
if (i == 4) {
i = 2;
}
if (i == 5) {
i = 1;
}
} else {
if (i == 4) {
i = 1;
}
if (i == 5) {
i = 2;
}
}
return i;
}
static int get_main_diagonal(THMCONST double rec_lattice[3][3])
{
int i, shortest;
double length, min_length;
double main_diag[3];
shortest = 0;
multiply_matrix_vector_di3(main_diag, rec_lattice, main_diagonals[0]);
min_length = norm_squared_d3(main_diag);
for (i = 1; i < 4; i++) {
multiply_matrix_vector_di3(main_diag, rec_lattice, main_diagonals[i]);
length = norm_squared_d3(main_diag);
if (min_length > length) {
min_length = length;
shortest = i;
}
}
return shortest;
}
static double norm_squared_d3(const double a[3])
{
return a[0] * a[0] + a[1] * a[1] + a[2] * a[2];
}
static void multiply_matrix_vector_di3(double v[3],
THMCONST double a[3][3],
const int b[3])
{
int i;
double c[3];
for (i = 0; i < 3; i++) {
c[i] = a[i][0] * b[0] + a[i][1] * b[1] + a[i][2] * b[2];
}
for (i = 0; i < 3; i++) {
v[i] = c[i];
}
}
static double _f(const int n,
const int m,
const double omega,
const double vertices_omegas[4])
{
return ((omega - vertices_omegas[m]) /
(vertices_omegas[n] - vertices_omegas[m]));
}
static double _J(const int i,
const int ci,
const double omega,
const double vertices_omegas[4])
{
switch (i) {
case 0:
return _J_0();
case 1:
switch (ci) {
case 0:
return _J_10(omega, vertices_omegas);
case 1:
return _J_11(omega, vertices_omegas);
case 2:
return _J_12(omega, vertices_omegas);
case 3:
return _J_13(omega, vertices_omegas);
}
case 2:
switch (ci) {
case 0:
return _J_20(omega, vertices_omegas);
case 1:
return _J_21(omega, vertices_omegas);
case 2:
return _J_22(omega, vertices_omegas);
case 3:
return _J_23(omega, vertices_omegas);
}
case 3:
switch (ci) {
case 0:
return _J_30(omega, vertices_omegas);
case 1:
return _J_31(omega, vertices_omegas);
case 2:
return _J_32(omega, vertices_omegas);
case 3:
return _J_33(omega, vertices_omegas);
}
case 4:
return _J_4();
}
warning_print("******* Warning *******\n");
warning_print(" J is something wrong. \n");
warning_print("******* Warning *******\n");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
return 0;
}
static double _I(const int i,
const int ci,
const double omega,
const double vertices_omegas[4])
{
switch (i) {
case 0:
return _I_0();
case 1:
switch (ci) {
case 0:
return _I_10(omega, vertices_omegas);
case 1:
return _I_11(omega, vertices_omegas);
case 2:
return _I_12(omega, vertices_omegas);
case 3:
return _I_13(omega, vertices_omegas);
}
case 2:
switch (ci) {
case 0:
return _I_20(omega, vertices_omegas);
case 1:
return _I_21(omega, vertices_omegas);
case 2:
return _I_22(omega, vertices_omegas);
case 3:
return _I_23(omega, vertices_omegas);
}
case 3:
switch (ci) {
case 0:
return _I_30(omega, vertices_omegas);
case 1:
return _I_31(omega, vertices_omegas);
case 2:
return _I_32(omega, vertices_omegas);
case 3:
return _I_33(omega, vertices_omegas);
}
case 4:
return _I_4();
}
warning_print("******* Warning *******\n");
warning_print(" I is something wrong. \n");
warning_print("******* Warning *******\n");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
return 0;
}
static double _n(const int i,
const double omega,
const double vertices_omegas[4])
{
switch (i) {
case 0:
return _n_0();
case 1:
return _n_1(omega, vertices_omegas);
case 2:
return _n_2(omega, vertices_omegas);
case 3:
return _n_3(omega, vertices_omegas);
case 4:
return _n_4();
}
warning_print("******* Warning *******\n");
warning_print(" n is something wrong. \n");
warning_print("******* Warning *******\n");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
return 0;
}
static double _g(const int i,
const double omega,
const double vertices_omegas[4])
{
switch (i) {
case 0:
return _g_0();
case 1:
return _g_1(omega, vertices_omegas);
case 2:
return _g_2(omega, vertices_omegas);
case 3:
return _g_3(omega, vertices_omegas);
case 4:
return _g_4();
}
warning_print("******* Warning *******\n");
warning_print(" g is something wrong. \n");
warning_print("******* Warning *******\n");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
return 0;
}
/* omega < omega1 */
static double _n_0(void)
{
return 0.0;
}
/* omega1 < omega < omega2 */
static double _n_1(const double omega,
const double vertices_omegas[4])
{
return (_f(1, 0, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) *
_f(3, 0, omega, vertices_omegas));
}
/* omega2 < omega < omega3 */
static double _n_2(const double omega,
const double vertices_omegas[4])
{
return (_f(3, 1, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) +
_f(3, 0, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) +
_f(3, 0, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) *
_f(1, 2, omega, vertices_omegas));
}
/* omega2 < omega < omega3 */
static double _n_3(const double omega,
const double vertices_omegas[4])
{
return (1.0 -
_f(0, 3, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 3, omega, vertices_omegas));
}
/* omega4 < omega */
static double _n_4(void)
{
return 1.0;
}
/* omega < omega1 */
static double _g_0(void)
{
return 0.0;
}
/* omega1 < omega < omega2 */
static double _g_1(const double omega,
const double vertices_omegas[4])
{
return (3 *
_f(1, 0, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) /
(vertices_omegas[3] - vertices_omegas[0]));
}
/* omega2 < omega < omega3 */
static double _g_2(const double omega,
const double vertices_omegas[4])
{
return (3 /
(vertices_omegas[3] - vertices_omegas[0]) *
(_f(1, 2, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) +
_f(2, 1, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas)));
}
/* omega3 < omega < omega4 */
static double _g_3(const double omega,
const double vertices_omegas[4])
{
return (3 *
_f(1, 3, omega, vertices_omegas) *
_f(2, 3, omega, vertices_omegas) /
(vertices_omegas[3] - vertices_omegas[0]));
}
/* omega4 < omega */
static double _g_4(void)
{
return 0.0;
}
static double _J_0(void)
{
return 0.0;
}
static double _J_10(const double omega,
const double vertices_omegas[4])
{
return (1.0 +
_f(0, 1, omega, vertices_omegas) +
_f(0, 2, omega, vertices_omegas) +
_f(0, 3, omega, vertices_omegas)) / 4;
}
static double _J_11(const double omega,
const double vertices_omegas[4])
{
return _f(1, 0, omega, vertices_omegas) / 4;
}
static double _J_12(const double omega,
const double vertices_omegas[4])
{
return _f(2, 0, omega, vertices_omegas) / 4;
}
static double _J_13(const double omega,
const double vertices_omegas[4])
{
return _f(3, 0, omega, vertices_omegas) / 4;
}
static double _J_20(const double omega,
const double vertices_omegas[4])
{
return (_f(3, 1, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) +
_f(3, 0, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) *
(1.0 +
_f(0, 3, omega, vertices_omegas)) +
_f(3, 0, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) *
_f(1, 2, omega, vertices_omegas) *
(1.0 +
_f(0, 3, omega, vertices_omegas) +
_f(0, 2, omega, vertices_omegas))) / 4 / _n_2(omega, vertices_omegas);
}
static double _J_21(const double omega,
const double vertices_omegas[4])
{
return (_f(3, 1, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) *
(1.0 +
_f(1, 3, omega, vertices_omegas) +
_f(1, 2, omega, vertices_omegas)) +
_f(3, 0, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) *
(_f(1, 3, omega, vertices_omegas) +
_f(1, 2, omega, vertices_omegas)) +
_f(3, 0, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) *
_f(1, 2, omega, vertices_omegas) *
_f(1, 2, omega, vertices_omegas)) / 4 / _n_2(omega, vertices_omegas);
}
static double _J_22(const double omega,
const double vertices_omegas[4])
{
return (_f(3, 1, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) +
_f(3, 0, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) +
_f(3, 0, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) *
_f(1, 2, omega, vertices_omegas) *
(_f(2, 1, omega, vertices_omegas) +
_f(2, 0, omega, vertices_omegas))) / 4 / _n_2(omega, vertices_omegas);
}
static double _J_23(const double omega,
const double vertices_omegas[4])
{
return (_f(3, 1, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) *
_f(3, 1, omega, vertices_omegas) +
_f(3, 0, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) *
(_f(3, 1, omega, vertices_omegas) +
_f(3, 0, omega, vertices_omegas)) +
_f(3, 0, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) *
_f(1, 2, omega, vertices_omegas) *
_f(3, 0, omega, vertices_omegas)) / 4 / _n_2(omega, vertices_omegas);
}
static double _J_30(const double omega,
const double vertices_omegas[4])
{
return (1.0 -
_f(0, 3, omega, vertices_omegas) *
_f(0, 3, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas);
}
static double _J_31(const double omega,
const double vertices_omegas[4])
{
return (1.0 -
_f(0, 3, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas);
}
static double _J_32(const double omega,
const double vertices_omegas[4])
{
return (1.0 +
_f(0, 3, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 3, omega, vertices_omegas) *
_f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas);
}
static double _J_33(const double omega,
const double vertices_omegas[4])
{
return (1.0 -
_f(0, 3, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 3, omega, vertices_omegas) *
(1.0 +
_f(3, 0, omega, vertices_omegas) +
_f(3, 1, omega, vertices_omegas) +
_f(3, 2, omega, vertices_omegas))) / 4 / _n_3(omega, vertices_omegas);
}
static double _J_4(void)
{
return 0.25;
}
static double _I_0(void)
{
return 0.0;
}
static double _I_10(const double omega,
const double vertices_omegas[4])
{
return (_f(0, 1, omega, vertices_omegas) +
_f(0, 2, omega, vertices_omegas) +
_f(0, 3, omega, vertices_omegas)) / 3;
}
static double _I_11(const double omega,
const double vertices_omegas[4])
{
return _f(1, 0, omega, vertices_omegas) / 3;
}
static double _I_12(const double omega,
const double vertices_omegas[4])
{
return _f(2, 0, omega, vertices_omegas) / 3;
}
static double _I_13(const double omega,
const double vertices_omegas[4])
{
return _f(3, 0, omega, vertices_omegas) / 3;
}
static double _I_20(const double omega,
const double vertices_omegas[4])
{
return (_f(0, 3, omega, vertices_omegas) +
_f(0, 2, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) *
_f(1, 2, omega, vertices_omegas) /
(_f(1, 2, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) +
_f(2, 1, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas))) / 3;
}
static double _I_21(const double omega,
const double vertices_omegas[4])
{
return (_f(1, 2, omega, vertices_omegas) +
_f(1, 3, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) /
(_f(1, 2, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) +
_f(2, 1, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas))) / 3;
}
static double _I_22(const double omega,
const double vertices_omegas[4])
{
return (_f(2, 1, omega, vertices_omegas) +
_f(2, 0, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) *
_f(1, 2, omega, vertices_omegas) /
(_f(1, 2, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) +
_f(2, 1, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas))) / 3;
}
static double _I_23(const double omega,
const double vertices_omegas[4])
{
return (_f(3, 0, omega, vertices_omegas) +
_f(3, 1, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas) *
_f(2, 1, omega, vertices_omegas) /
(_f(1, 2, omega, vertices_omegas) *
_f(2, 0, omega, vertices_omegas) +
_f(2, 1, omega, vertices_omegas) *
_f(1, 3, omega, vertices_omegas))) / 3;
}
static double _I_30(const double omega,
const double vertices_omegas[4])
{
return _f(0, 3, omega, vertices_omegas) / 3;
}
static double _I_31(const double omega,
const double vertices_omegas[4])
{
return _f(1, 3, omega, vertices_omegas) / 3;
}
static double _I_32(const double omega,
const double vertices_omegas[4])
{
return _f(2, 3, omega, vertices_omegas) / 3;
}
static double _I_33(const double omega,
const double vertices_omegas[4])
{
return (_f(3, 0, omega, vertices_omegas) +
_f(3, 1, omega, vertices_omegas) +
_f(3, 2, omega, vertices_omegas)) / 3;
}
static double _I_4(void)
{
return 0.0;
}
|
silo-check.c | int main() {
double **u;
double **w;
unsigned long int _imopVarPre147;
void *_imopVarPre148;
_imopVarPre147 = 500 * sizeof(double *);
_imopVarPre148 = malloc(_imopVarPre147);
u = (double **) _imopVarPre148;
unsigned long int _imopVarPre151;
void *_imopVarPre152;
_imopVarPre151 = 500 * sizeof(double *);
_imopVarPre152 = malloc(_imopVarPre151);
w = (double **) _imopVarPre152;
int p;
for (p = 0; p < 500; p++) {
unsigned long int _imopVarPre155;
void *_imopVarPre156;
_imopVarPre155 = 500 * sizeof(double);
_imopVarPre156 = malloc(_imopVarPre155);
u[p] = (double *) _imopVarPre156;
unsigned long int _imopVarPre159;
void *_imopVarPre160;
_imopVarPre159 = 500 * sizeof(double);
_imopVarPre160 = malloc(_imopVarPre159);
w[p] = (double *) _imopVarPre160;
}
#pragma omp parallel
{
int i, j;
#pragma omp for nowait
for (i = 1; i < 500 - 1; i++) {
for (j = 1; j < 500 - 1; j++) {
w[i][j] =
(u[i - 1][j] + u[i + 1][j] + u[i][j - 1] + u[i][j + 1])
/ 4.0;
}
}
#pragma omp barrier
my_diff = 0.0;
#pragma omp for nowait
for (i = 1; i < 500 - 1; i++) {
for (j = 1; j < 500 - 1; j++) {
double _imopVarPre167;
double _imopVarPre168;
_imopVarPre167 = w[i][j] - u[i][j];
_imopVarPre168 = fabs(_imopVarPre167);
if (my_diff < _imopVarPre168) {
double _imopVarPre170;
double _imopVarPre171;
_imopVarPre170 = w[i][j] - u[i][j];
_imopVarPre171 = fabs(_imopVarPre170);
my_diff = _imopVarPre171;
}
}
}
}
}
|
DRB084-threadprivatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
No threadprivate is used to avoid data races.
Data race pairs sum0@61:3 vs. sum0@61:8
sum0@61:3 vs. sum0@61:3
*/
#include <stdio.h>
#include <assert.h>
#include <omp.h>
int sum0 = 0;
int sum1 = 0;
//#pragma omp threadprivate(sum0)
void foo(int i)
{
sum0 = sum0 + i;
}
int main()
{
int i;
int sum = 0;
for (i = 1; i <= 1000; i += 1) {
foo(i);
}
sum = sum + sum0;
/* reference calculation */
#pragma omp parallel for private (i) reduction (+:sum1)
for (i = 1; i <= 1000; i += 1) {
sum1 = sum1 + i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
// assert(sum==sum1);
return 0;
}
|
type_conversions.h | /*
* This set of methods provides dataType conversions in all possible directions supported:
* FP8, FP16, FLOAT, DOUBLE, INT8, UINT8, UINT16,
*
* @author raver119@gmail.com
*/
#ifndef LIBND4J_TYPE_CONVERSIONS_H
#define LIBND4J_TYPE_CONVERSIONS_H
#define ND4J_FLOAT8 0
#define ND4J_INT8 1
#define ND4J_UINT8 2
#define ND4J_FLOAT16 3
#define ND4J_INT16 4
#define ND4J_UINT16 5
#define ND4J_FLOAT32 6
#define ND4J_DOUBLE 7
#define ND4J_THRESHOLD 8
#define ND4J_FLOAT24 119 // not supported after all. might want to add support later.
#include <ops/ops.h>
#include <templatemath.h>
#include <types/float16.h>
#include <types/float8.h>
#include <types/uint8.h>
#include <types/int8.h>
#include <types/int16.h>
#include <types/uint16.h>
typedef union
{
float f_;
int i_;
} FloatBits;
#ifdef __CUDACC__
template<typename S, typename T>
__device__ inline void convertKernelGeneric(void *dx, Nd4jIndex N, void *dz) {
S *x = reinterpret_cast<S *> (dx);
T *z = reinterpret_cast<T *> (dz);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jIndex i = tid; i < N; i+= blockDim.x * gridDim.x) {
z[i] = (T) ((float) x[i]);
}
};
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*/
template<typename T>
__device__ inline void encoderKernelP1Generic(void *dx, Nd4jIndex N, void *dz, float threshold) {
T *x = reinterpret_cast<T *> (dx);
int *z = reinterpret_cast<int *> (dz);
//basically, for phase One we want do calculation: how many eligible values we have, and which blocks will be holding data
Nd4jIndex tid = blockIdx.x * blockDim.x + threadIdx.x;
int pass = tid < N && nd4j::math::nd4j_abs<T>(x[tid]) >= (T) threshold ? 1 : 0;
int bp=__syncthreads_count(pass);
if (threadIdx.x == 0) {
// saving out per-block passes
z[blockIdx.x+1] = bp;
// saving out sum
atomicAdd(&z[0], bp);
}
}
__device__ __inline__ int pow2i (int e){
return 1<<e;
}
#define NUM_BANKS 32
#define LOG_NUM_BANKS 4
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) CUT_BANK_CHECKER(temp, index)
#else
#define TEMP(index) temp[index]
#endif
inline bool
isPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
inline int
floorPow2(int n)
{
#ifdef WIN32
// method 2
return 1 << (int)logb((float)n);
#else
// method 1
// float nf = (float)n;
// return 1 << (((*(int*)&nf) >> 23) - 127);
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
template <bool isNP2>
__device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
// pad values beyond n with zeros
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMem(int* g_odata, int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) {
__syncthreads();
// write results to global memory
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) { // compile-time decision
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) {
if (threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ unsigned int buildSum(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlock(int *data, int blockIndex, int *blockSums) {
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
template <bool storeSum, bool isNP2>
__global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>((int *) s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
__global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) {
__shared__ float uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
/*
* This kernel does prefix sum in parallel, to calculate offsets for each block
*/
template<typename T>
__device__ inline void encoderKernelP2Generic(void *dx, Nd4jIndex n, void *dz) {
// TODO: to be remove
}
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*
* Based on: https://github.com/knotman90/cuStreamComp <-- efficient CUDA stream compaction algorithm
*/
template<typename T>
__device__ inline void encoderKernelP3Generic(void *dx, int *offsets, Nd4jIndex N, void *dz) {
T *x = reinterpret_cast<T *> (dx);
int *z = reinterpret_cast<int *> (dz);
Nd4jIndex tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int warpTotals[];
// fetch block offset only once
__shared__ float threshold;
__shared__ FloatBits fb;
__shared__ int bo;
__shared__ int limit;
if (threadIdx.x == 0) {
limit = z[0];
fb.i_ = z[2];
threshold = fb.f_;
bo = offsets[blockIdx.x];
}
__syncthreads();
if (tid < N) {
T value = x[tid];
int pred = nd4j::math::nd4j_abs<T>(value) >= (T) threshold ? 1 : 0;
int w_i = threadIdx.x/warpSize; //warp index
int w_l = tid % warpSize;//thread index within a warp
int t_m = INT_MAX >> (warpSize-w_l-1); //thread mask (ERROR IN THE PAPER minus one is required)
int b = __ballot(pred) & t_m; //balres = number whose ith bit isone if the ith's thread pred is true masked up to the current index in warp
int t_u = __popc(b); // popc count the number of bit one. simply count the number predicated true BEFORE MY INDEX
if(w_l==warpSize-1){
warpTotals[w_i]=t_u+pred;
}
__syncthreads();
if(w_i==0 && w_l<blockDim.x/warpSize){
int w_i_u=0;
for(int j=0;j<=5;j++){
int b_j =__ballot( warpTotals[w_l] & pow2i(j) ); //# of the ones in the j'th digit of the warp offsets
w_i_u += (__popc(b_j & t_m) ) << j;
//printf("indice %i t_m=%i,j=%i,b_j=%i,w_i_u=%i\n",w_l,t_m,j,b_j,w_i_u);
}
warpTotals[w_l]=w_i_u;
}
__syncthreads();
if(pred){
int idx = t_u + warpTotals[w_i] + bo + 4;
if (idx < limit + 4) {
z[idx]= value > (T) 0.0f ? tid+1 : -(tid + 1);
x[tid] = value > (T) 0.0f ? x[tid] - threshold : x[tid] + threshold;
}
}
}
}
/*
* This kernel handles decode from sparse threshold array, to dense array
*
* PLEASE NOTE: Z is expected to be memset to 0
*/
template<typename T>
__device__ inline void decoderKernelGeneric(void *dx, Nd4jIndex N, void *dz) {
int *x = reinterpret_cast<int *> (dx);
T *z = reinterpret_cast<T *> (dz);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float threshold;
__shared__ int limit;
__shared__ FloatBits fb;
if (threadIdx.x == 0) {
limit = x[0];
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
for (int e = tid; e < limit; e += blockDim.x * gridDim.x) {
int el = x[e+4];
int ael = nd4j::math::nd4j_abs<int>(el) - 1;
// TODO: investigate, if += would work better here, as in "decoded accumulation"
z[ael] += el > 0 ? threshold : -threshold;
}
}
template<typename T>
__device__ inline void cudaDecodeBitmapGeneric(void *dx, Nd4jIndex N, T *dz) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T *shmem;
__shared__ FloatBits fb;
__shared__ float threshold;
__shared__ int *x;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = (T*) mem;
x = (int *)dx;
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
int lim = N / 16 + 5;
for (int i = tid; i < N; i += blockDim.x * gridDim.x) {
int byteId = i / 16 + 4;
// printf("I: [%i]; byteId: [%i]\n", i, byteId);
shmem[threadIdx.x] = dz[i];
__syncthreads();
if (threadIdx.x % 16 == 0) {
int byte = x[byteId];
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
int bitId = (i + e) % 16;
bool hasBit = (byte & 1 << (bitId) ) != 0;
bool hasSign = (byte & 1 << (bitId + 16) ) != 0;
if (hasBit) {
if (hasSign)
shmem[threadIdx.x + bitId] -= threshold;
else
shmem[threadIdx.x + bitId] += threshold;
} else if (hasSign) {
shmem[threadIdx.x + bitId] -= threshold / 2;
}
}
}
__syncthreads();
dz[i] = shmem[threadIdx.x];
}
}
template<typename T>
__device__ inline void cudaEncodeBitmapGeneric(T *dx, Nd4jIndex N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int counter;
__shared__ int *shmem;
__shared__ T *vals;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = (int*) mem;
vals = (T *) (shmem + blockDim.x);
counter = 0;
}
__syncthreads();
for (int i = tid; i < N; i += blockDim.x * gridDim.x) {
// all threads in block reading stuff
T val = dx[i];
T abs = nd4j::math::nd4j_abs<T>(val);
int byteId = i / 16 + 4;
int bitId = i % 16;
shmem[threadIdx.x] = 0;
vals[threadIdx.x] = val;
if (abs >= (T) threshold) {
shmem[threadIdx.x] = 1 << (bitId);
atomicAdd(&counter, 1);
if (val < (T) 0.0f) {
shmem[threadIdx.x] |= 1 << (bitId + 16);
vals[threadIdx.x] += (T) threshold;
} else {
vals[threadIdx.x] -= (T) threshold;
}
} else if (abs >= (T) threshold / (T) 2.0f && val < (T) 0.0f) {
atomicAdd(&counter, 1);
shmem[threadIdx.x] = 1 << (bitId + 16);
vals[threadIdx.x] += (T) threshold / (T) 2.0f;
}
__syncthreads();
if (threadIdx.x % 16 == 0) {
int byte = 0;
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
byte |= shmem[threadIdx.x + e];
}
dz[byteId] = byte;
}
__syncthreads();
dx[i] = vals[threadIdx.x];
/*
// but only 1 thread in sub-warp writes encoded values
if (threadIdx.x % 16 == 0) {
int byteId = i / 16 + 4;
int byte = 0;
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
int bitId = (i + e) % 16;
if (shmem[threadIdx.x + e + blockDim.x] >= (T) threshold) {
byte |= 1 << (bitId + 1);
atomicAdd(&counter, 1);
if (shmem[threadIdx.x + e] < (T) 0.0f) {
byte |= 1 << (bitId + 16 + 1);
}
shmem[threadIdx.x + e + blockDim.x] = (T) 0.0f;
} else if (shmem[threadIdx.x + e + blockDim.x] >= (T) threshold / (T) 2.0f && shmem[threadIdx.x + e] < (T) 0.0f) {
byte |= 1 << (bitId + 16 + 1);
atomicAdd(&counter, 1);
}
}
dz[byteId] = byte;
}
__syncthreads();
if (shmem[threadIdx.x + blockDim.x] == (T) 0.0f && shmem[threadIdx.x] != (T) 0.0f) {
if (shmem[threadIdx.x] < (T) 0.0f) {
dx[i] = shmem[threadIdx.x] + threshold;
} else {
dx[i] = shmem[threadIdx.x] - threshold;
}
}
*/
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(scalar, counter);
}
}
extern "C" __global__ void cudaEncodeBitmapFloat(float *dx, Nd4jIndex N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
cudaEncodeBitmapGeneric<float>(dx, N, dz, scalar, reductionBuffer, threshold);
}
extern "C" __global__ void cudaEncodeBitmapDouble(double *dx, Nd4jIndex N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
cudaEncodeBitmapGeneric<double>(dx, N, dz, scalar, reductionBuffer, threshold);
}
extern "C" __global__ void cudaEncodeBitmapHalf(float16 *dx, Nd4jIndex N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
cudaEncodeBitmapGeneric<float16>(dx, N, dz, scalar, reductionBuffer, threshold);
}
extern "C" __global__ void cudaDecodeBitmapFloat(void *dx, Nd4jIndex N, float *dz) {
cudaDecodeBitmapGeneric<float>(dx, N, dz);
}
extern "C" __global__ void cudaDecodeBitmapDouble(void *dx, Nd4jIndex N, double *dz) {
cudaDecodeBitmapGeneric<double>(dx, N, dz);
}
extern "C" __global__ void cudaDecodeBitmapHalf(void *dx, Nd4jIndex N, float16 *dz) {
cudaDecodeBitmapGeneric<float16>(dx, N, dz);
}
extern "C" __global__ void encoderKernelP1Float(void *dx, Nd4jIndex N, void *dz, float threshold) {
encoderKernelP1Generic<float>(dx, N, dz, threshold);
}
extern "C" __global__ void encoderKernelP1Double(void *dx, Nd4jIndex N, void *dz, float threshold) {
encoderKernelP1Generic<double>(dx, N, dz, threshold);
}
extern "C" __global__ void encoderKernelP1Half(void *dx, Nd4jIndex N, void *dz, float threshold) {
encoderKernelP1Generic<float16>(dx, N, dz, threshold);
}
extern "C" __global__ void encoderKernelP2Float(int *dx, Nd4jIndex N, int *dz) {
encoderKernelP2Generic<float>(dx, N, dz);
}
extern "C" __global__ void encoderKernelP3Float(void *dx, int *offsets, Nd4jIndex N, void *dz) {
encoderKernelP3Generic<float>(dx, offsets, N, dz);
}
extern "C" __global__ void encoderKernelP3Double(void *dx, int *offsets, Nd4jIndex N, void *dz) {
encoderKernelP3Generic<double>(dx, offsets, N, dz);
}
extern "C" __global__ void encoderKernelP3Half(void *dx, int *offsets, Nd4jIndex N, void *dz) {
encoderKernelP3Generic<float16>(dx, offsets, N, dz);
}
extern "C" __global__ void decoderKernelFloat(void *dx, Nd4jIndex N, void *dz) {
decoderKernelGeneric<float>(dx, N, dz);
}
extern "C" __global__ void decoderKernelDouble(void *dx, Nd4jIndex N, void *dz) {
decoderKernelGeneric<double>(dx, N, dz);
}
extern "C" __global__ void decoderKernelHalf(void *dx, Nd4jIndex N, void *dz) {
decoderKernelGeneric<float16>(dx, N, dz);
}
#endif
template<typename S, typename T>
void convertGeneric(void *dx, Nd4jIndex N, void *dz) {
S *x = reinterpret_cast<S *> (dx);
T *z = reinterpret_cast<T *> (dz);
if (N < 8000) {
#pragma omp simd
for (int i = 0; i < N; i++) {
z[i] = (T) ((float) x[i]);
}
} else {
#pragma omp parallel for
for (int i = 0; i < N; i++) {
z[i] = (T) ((float) x[i]);
}
}
};
template <typename T>
void convertToThreshold(void *dx, Nd4jIndex N, void *dz) {
// we suppose that first 4 bytes are integer, second 4 bytes are float
// integer: enc length
// integer: dec length
// float: threshold
FloatBits fb;
T *x = (T *) dx;
int *z = (int *) dz;
int limit = z[0];
fb.i_ = z[2];
float threshold = fb.f_;
// FIXME: int limit is sad thing here, 2B elements limitation
z[1] = (int) N;
// we use 3 as offset, since first 12 bytes are occupied with header
int flimit = limit + 4;
volatile int cnt = 4;
volatile bool flag = false;
#pragma omp parallel for schedule(guided) default(shared)
for (int e = 0; e < N; e++) {
bool flag_load;
#pragma omp atomic read
flag_load = flag;
if (flag_load)
continue;
T cUpd = x[e];
if (cUpd >= (T) threshold) {
int idx;
#pragma omp atomic capture
idx = cnt++;
if (idx >= flimit) {
#pragma omp atomic write
flag = true;
continue;
}
z[idx] = e + 1;
x[e] -= (T) threshold;
} else if (cUpd <= (T) -threshold) {
int idx;
#pragma omp atomic capture
idx = cnt++;
if (idx >= flimit) {
#pragma omp atomic write
flag = true;
continue;
}
z[idx] = -e - 1;
x[e] += (T) threshold;
}
}
}
template <typename T>
void convertFromThreshold(void *dx, Nd4jIndex N, void *dz) {
FloatBits fb;
T *z = (T *) dz;
int *x = (int *) dx;
int limit = x[0];
fb.i_ = x[2];
float threshold = fb.f_;
// we use 3 as offset, since first 12 bytes are occupied with header
int flimit = limit + 4;
#pragma omp parallel for schedule(guided)
for (int e = 4; e < flimit; e++) {
int el = x[e];
int ael = nd4j::math::nd4j_abs<int>(el) - 1;
z[ael] += el > 0 ? threshold : -threshold;
}
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer x, long N, int dstType, Nd4jPointer z);
*/
void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer x, Nd4jIndex N, int dstType, Nd4jPointer z) {
void *dx = reinterpret_cast<void *> (x);
void *dz = reinterpret_cast<void *> (z);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertGeneric<double, nd4j::float8>(dx, N, dz);
} else if (dstType == ND4J_INT8) {
convertGeneric<nd4j::float8, nd4j::int8>(dx, N, dz);
} else if (dstType == ND4J_UINT8) {
convertGeneric<nd4j::float8, nd4j::uint8>(dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
convertGeneric<nd4j::float8, float16>(dx, N, dz);
} else if (dstType == ND4J_INT16) {
convertGeneric<nd4j::float8, nd4j::int16>(dx, N, dz);
} else if (dstType == ND4J_UINT16) {
convertGeneric<nd4j::float8, nd4j::uint16>(dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
convertGeneric<nd4j::float8, float>(dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
convertGeneric<nd4j::float8, double>(dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
convertGeneric<nd4j::int8, nd4j::float8>(dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertGeneric<nd4j::int8, nd4j::int8>(dx, N, dz);
} else if (dstType == ND4J_UINT8) {
convertGeneric<nd4j::int8, nd4j::uint8>(dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
convertGeneric<nd4j::int8, float16>(dx, N, dz);
} else if (dstType == ND4J_INT16) {
convertGeneric<nd4j::int8, nd4j::int16>(dx, N, dz);
} else if (dstType == ND4J_UINT16) {
convertGeneric<nd4j::int8, nd4j::uint16>(dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
convertGeneric<nd4j::int8, float>(dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
convertGeneric<nd4j::int8, double>(dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
convertGeneric<nd4j::uint8, nd4j::float8>(dx, N, dz);
} else if (dstType == ND4J_INT8) {
convertGeneric<nd4j::uint8, nd4j::int8>(dx, N, dz);
} else if (dstType == ND4J_UINT8) {
convertGeneric<nd4j::uint8, nd4j::uint8>(dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
convertGeneric<nd4j::uint8, float16>(dx, N, dz);
} else if (dstType == ND4J_INT16) {
convertGeneric<nd4j::uint8, nd4j::int16>(dx, N, dz);
} else if (dstType == ND4J_UINT16) {
convertGeneric<nd4j::uint8, nd4j::uint16>(dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
convertGeneric<nd4j::uint8, float>(dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
convertGeneric<nd4j::uint8, double>(dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
convertGeneric<float16, nd4j::float8>(dx, N, dz);
} else if (dstType == ND4J_INT8) {
convertGeneric<float16, nd4j::int8>(dx, N, dz);
} else if (dstType == ND4J_UINT8) {
convertGeneric<float16, nd4j::uint8>(dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
convertGeneric<float16, float16>(dx, N, dz);
} else if (dstType == ND4J_INT16) {
convertGeneric<float16, nd4j::int16>(dx, N, dz);
} else if (dstType == ND4J_UINT16) {
convertGeneric<float16, nd4j::uint16>(dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
convertGeneric<float16, float>(dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
convertGeneric<float16, double>(dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
convertToThreshold<float16>(dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
convertGeneric<nd4j::int16, nd4j::float8>(dx, N, dz);
} else if (dstType == ND4J_INT8) {
convertGeneric<nd4j::int16, nd4j::int8>(dx, N, dz);
} else if (dstType == ND4J_UINT8) {
convertGeneric<nd4j::int16, nd4j::uint8>(dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
convertGeneric<nd4j::int16, float16>(dx, N, dz);
} else if (dstType == ND4J_INT16) {
convertGeneric<nd4j::int16, nd4j::int16>(dx, N, dz);
} else if (dstType == ND4J_UINT16) {
convertGeneric<nd4j::int16, nd4j::uint16>(dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
convertGeneric<nd4j::int16, float>(dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
convertGeneric<nd4j::int16, double>(dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
convertGeneric<float, nd4j::float8>(dx, N, dz);
} else if (dstType == ND4J_INT8) {
convertGeneric<float, nd4j::int8>(dx, N, dz);
} else if (dstType == ND4J_UINT8) {
convertGeneric<float, nd4j::uint8>(dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
convertGeneric<float, float16>(dx, N, dz);
} else if (dstType == ND4J_INT16) {
convertGeneric<float, nd4j::int16>(dx, N, dz);
} else if (dstType == ND4J_UINT16) {
convertGeneric<float, nd4j::uint16>(dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
convertGeneric<float, double>(dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
convertToThreshold<float>(dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
convertGeneric<double, nd4j::float8>(dx, N, dz);
} else if (dstType == ND4J_INT8) {
convertGeneric<double, nd4j::int8>(dx, N, dz);
} else if (dstType == ND4J_UINT8) {
convertGeneric<double, nd4j::uint8>(dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
convertGeneric<double, float16>(dx, N, dz);
} else if (dstType == ND4J_INT16) {
convertGeneric<double, nd4j::int16>(dx, N, dz);
} else if (dstType == ND4J_UINT16) {
convertGeneric<double, nd4j::uint16>(dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
convertGeneric<double, float>(dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
convertToThreshold<double>(dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
convertFromThreshold<float16>(dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
convertFromThreshold<float>(dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
convertFromThreshold<double>(dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
}
#endif //LIBND4J_TYPE_CONVERSIONS_H
|
increase.c | // RUN: %libomp-compile && env OMP_DISPLAY_AFFINITY=true %libomp-run | %python %S/check.py -c 'CHECK' %s
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
omp_set_affinity_format("TESTER: tl:%L tn:%n nt:%N");
// should print all for first parallel
omp_set_num_threads(4);
#pragma omp parallel
{ }
// should print all because of new threads
omp_set_num_threads(8);
#pragma omp parallel
{ }
// should not print anything here
omp_set_num_threads(6);
#pragma omp parallel
{ }
// should print all because of new thread
omp_set_num_threads(9);
#pragma omp parallel
{ }
// should not print anything here
omp_set_num_threads(2);
#pragma omp parallel
{ }
return 0;
}
// CHECK: num_threads=4 TESTER: tl:1 tn:[0-3] nt:4
// CHECK: num_threads=8 TESTER: tl:1 tn:[0-7] nt:8
// CHECK: num_threads=6 TESTER: tl:1 tn:[0-5] nt:6
// CHECK: num_threads=9 TESTER: tl:1 tn:[0-8] nt:9
// CHECK: num_threads=2 TESTER: tl:1 tn:[01] nt:2
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
utils.c | #include <stdio.h>
#include <stdarg.h>
#include <stdint.h>
#include <stddef.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
#include "bench.h"
void
fun3d_printf(const uint32_t c, const char *format, ...)
{
uint32_t val = 0;
switch(c)
{
case 0: /* ANSI_COLOR_RED */
val = 31;
break;
case 1: /* ANSI_COLOR_GREEN */
val = 32;
break;
case 2: /* ANSI_COLOR_YELLOW */
val = 33;
break;
case 3: /* ANSI_COLOR_BLUE */
val = 34;
break;
case 4: /* ANSI_COLOR_MAGENTA */
val = 35;
break;
case 5: /* ANSI_COLOR_CYAN */
val = 36;
break;
default:
val = 0;
break;
}
char color[20];
sprintf(color, "\x1b[%dm", val);
va_list arg;
va_start(arg, format);
fprintf(stdout, "%s", color);
vfprintf(stdout, format, arg);
fprintf(stdout, "\x1b[0m");
va_end(arg);
}
double
Compute2ndNorm(const size_t sz, const double *v)
{
BENCH start_bench = rdbench();
double norm = 0.f;
uint32_t i;
#pragma omp parallel for reduction(+: norm)
for(i = 0; i < sz; i++) norm += v[i] * v[i];
fun3d_log(start_bench, KERNEL_BLAS);
return(sqrt(norm));
}
void
ComputeAXPY(const size_t sz, const double a, const double *x, double *y)
{
BENCH start_bench = rdbench();
uint32_t i;
#pragma omp parallel for
for(i = 0; i < sz; i++)
{
/* AXPY */
const double ax = a * x[i];
const double axpy = ax + y[i];
/* Update the vector component */
y[i] = axpy;
}
fun3d_log(start_bench, KERNEL_BLAS);
}
void
ComputeNewAXPY(const size_t sz, const double a, const double *x, const double *y, double *w)
{
BENCH start_bench = rdbench();
uint32_t i;
#pragma omp parallel for
for(i = 0; i < sz; i++)
{
/* AXPY */
const double ax = a * x[i];
const double axpy = ax + y[i];
/* Update the vector component */
w[i] = axpy;
}
fun3d_log(start_bench, KERNEL_BLAS);
}
double
Normalize(const size_t sz, double *x)
{
BENCH start_bench = rdbench();
double norm = Compute2ndNorm(sz, x);
uint32_t i;
#pragma omp parallel for
for(i = 0; i < sz; i++) x[i] *= (1.f / norm);
fun3d_log(start_bench, KERNEL_BLAS);
return norm;
} |
GB_unop__identity_fc64_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_bool)
// op(A') function: GB (_unop_tran__identity_fc64_bool)
// C type: GxB_FC64_t
// A type: bool
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_bool)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_int8
// op(A') function: GB_tran__ainv_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
linked_omp25.c | #include <stdlib.h>
#include <stdio.h>
#include "omp.h"
#define N 5
#define FS 38
#define NMAX 10
struct node {
int data;
int fibdata;
struct node* next;
};
int fib(int n) {
int x, y;
if (n < 2) {
return (n);
} else {
x = fib(n - 1);
y = fib(n - 2);
return (x + y);
}
}
void processwork(struct node* p)
{
int n;
n = p->data;
p->fibdata = fib(n);
}
struct node* init_list(struct node* p) {
int i;
struct node* head = NULL;
struct node* temp = NULL;
head = malloc(sizeof(struct node));
p = head;
p->data = FS;
p->fibdata = 0;
for (i=0; i< N; i++) {
temp = malloc(sizeof(struct node));
p->next = temp;
p = temp;
p->data = FS + i + 1;
p->fibdata = i+1;
}
p->next = NULL;
return head;
}
int main(int argc, char *argv[]) {
double start, end;
struct node *p=NULL;
struct node *temp=NULL;
struct node *head=NULL;
struct node *parr[NMAX];
int i, count=0;
printf("Process linked list\n");
printf(" Each linked list node will be processed by function 'processwork()'\n");
printf(" Each ll node will compute %d fibonacci numbers beginning with %d\n",N,FS);
p = init_list(p);
head = p;
start = omp_get_wtime();
{
while (p != NULL) {
processwork(p);
p = p->next;
}
}
end = omp_get_wtime();
printf("serial Compute Time: %f seconds\n", end - start);
p = head;
start = omp_get_wtime();
{
// count number of items in the list. Strictly speaking this isn't
// needed since we know there are N elements in the list. But in
// most cases you don't know this and need to count nodes.
while (p != NULL) {
p = p->next;
count++;
}
// traverse the list and collect pointers into an array.
p = head;
for(i=0; i<count; i++) {
parr[i] = p;
p = p->next;
}
// do the work in parallel
#pragma omp parallel
{
#pragma omp single
printf(" %d threads \n",omp_get_num_threads());
#pragma omp for schedule(static,1)
for(i=0; i<count; i++)
processwork(parr[i]);
}
}
end = omp_get_wtime();
p = head;
while (p != NULL) {
printf("%d : %d\n",p->data, p->fibdata);
temp = p->next;
free (p);
p = temp;
}
free (p);
printf("Compute Time: %f seconds\n", end - start);
return 0;
}
|
omp.h | /*
* include/omp.h.var
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef __OMP_H
# define __OMP_H
# include <stdlib.h>
# include <stdint.h>
# define KMP_VERSION_MAJOR 5
# define KMP_VERSION_MINOR 0
# define KMP_VERSION_BUILD 20140926
# define KMP_BUILD_DATE "No_Timestamp"
# ifdef __cplusplus
extern "C" {
# endif
# define omp_set_affinity_format ompc_set_affinity_format
# define omp_get_affinity_format ompc_get_affinity_format
# define omp_display_affinity ompc_display_affinity
# define omp_capture_affinity ompc_capture_affinity
# if defined(_WIN32)
# define __KAI_KMPC_CONVENTION __cdecl
# ifndef __KMP_IMP
# define __KMP_IMP __declspec(dllimport)
# endif
# else
# define __KAI_KMPC_CONVENTION
# ifndef __KMP_IMP
# define __KMP_IMP
# endif
# endif
/* schedule kind constants */
typedef enum omp_sched_t {
omp_sched_static = 1,
omp_sched_dynamic = 2,
omp_sched_guided = 3,
omp_sched_auto = 4,
omp_sched_monotonic = 0x80000000
} omp_sched_t;
/* set API functions */
extern void __KAI_KMPC_CONVENTION omp_set_num_threads (int);
extern void __KAI_KMPC_CONVENTION omp_set_dynamic (int);
extern void __KAI_KMPC_CONVENTION omp_set_nested (int);
extern void __KAI_KMPC_CONVENTION omp_set_max_active_levels (int);
extern void __KAI_KMPC_CONVENTION omp_set_schedule (omp_sched_t, int);
/* query API functions */
extern int __KAI_KMPC_CONVENTION omp_get_num_threads (void);
extern int __KAI_KMPC_CONVENTION omp_get_dynamic (void);
extern int __KAI_KMPC_CONVENTION omp_get_nested (void);
extern int __KAI_KMPC_CONVENTION omp_get_max_threads (void);
extern int __KAI_KMPC_CONVENTION omp_get_thread_num (void);
extern int __KAI_KMPC_CONVENTION omp_get_num_procs (void);
extern int __KAI_KMPC_CONVENTION omp_in_parallel (void);
extern int __KAI_KMPC_CONVENTION omp_in_final (void);
extern int __KAI_KMPC_CONVENTION omp_get_active_level (void);
extern int __KAI_KMPC_CONVENTION omp_get_level (void);
extern int __KAI_KMPC_CONVENTION omp_get_ancestor_thread_num (int);
extern int __KAI_KMPC_CONVENTION omp_get_team_size (int);
extern int __KAI_KMPC_CONVENTION omp_get_thread_limit (void);
extern int __KAI_KMPC_CONVENTION omp_get_max_active_levels (void);
extern void __KAI_KMPC_CONVENTION omp_get_schedule (omp_sched_t *, int *);
extern int __KAI_KMPC_CONVENTION omp_get_max_task_priority (void);
/* lock API functions */
typedef struct omp_lock_t {
void * _lk;
} omp_lock_t;
extern void __KAI_KMPC_CONVENTION omp_init_lock (omp_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_set_lock (omp_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_unset_lock (omp_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_destroy_lock (omp_lock_t *);
extern int __KAI_KMPC_CONVENTION omp_test_lock (omp_lock_t *);
/* nested lock API functions */
typedef struct omp_nest_lock_t {
void * _lk;
} omp_nest_lock_t;
extern void __KAI_KMPC_CONVENTION omp_init_nest_lock (omp_nest_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_set_nest_lock (omp_nest_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_unset_nest_lock (omp_nest_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_destroy_nest_lock (omp_nest_lock_t *);
extern int __KAI_KMPC_CONVENTION omp_test_nest_lock (omp_nest_lock_t *);
/* OpenMP 5.0 Synchronization hints*/
typedef enum omp_sync_hint_t {
omp_sync_hint_none = 0,
omp_lock_hint_none = omp_sync_hint_none,
omp_sync_hint_uncontended = 1,
omp_lock_hint_uncontended = omp_sync_hint_uncontended,
omp_sync_hint_contended = (1<<1),
omp_lock_hint_contended = omp_sync_hint_contended,
omp_sync_hint_nonspeculative = (1<<2),
omp_lock_hint_nonspeculative = omp_sync_hint_nonspeculative,
omp_sync_hint_speculative = (1<<3),
omp_lock_hint_speculative = omp_sync_hint_speculative,
kmp_lock_hint_hle = (1<<16),
kmp_lock_hint_rtm = (1<<17),
kmp_lock_hint_adaptive = (1<<18)
} omp_sync_hint_t;
/* lock hint type for dynamic user lock */
typedef omp_sync_hint_t omp_lock_hint_t;
/* hinted lock initializers */
extern void __KAI_KMPC_CONVENTION omp_init_lock_with_hint(omp_lock_t *, omp_lock_hint_t);
extern void __KAI_KMPC_CONVENTION omp_init_nest_lock_with_hint(omp_nest_lock_t *, omp_lock_hint_t);
/* time API functions */
extern double __KAI_KMPC_CONVENTION omp_get_wtime (void);
extern double __KAI_KMPC_CONVENTION omp_get_wtick (void);
/* OpenMP 4.0 */
extern int __KAI_KMPC_CONVENTION omp_get_default_device (void);
extern void __KAI_KMPC_CONVENTION omp_set_default_device (int);
extern int __KAI_KMPC_CONVENTION omp_is_initial_device (void);
extern int __KAI_KMPC_CONVENTION omp_get_num_devices (void);
extern int __KAI_KMPC_CONVENTION omp_get_num_teams (void);
extern int __KAI_KMPC_CONVENTION omp_get_team_num (void);
extern int __KAI_KMPC_CONVENTION omp_get_cancellation (void);
/* OpenMP 4.5 */
extern int __KAI_KMPC_CONVENTION omp_get_initial_device (void);
extern void* __KAI_KMPC_CONVENTION omp_target_alloc(size_t, int);
extern void __KAI_KMPC_CONVENTION omp_target_free(void *, int);
extern int __KAI_KMPC_CONVENTION omp_target_is_present(const void *, int);
extern int __KAI_KMPC_CONVENTION omp_target_memcpy(void *, const void *, size_t, size_t, size_t, int, int);
extern int __KAI_KMPC_CONVENTION omp_target_memcpy_rect(void *, const void *, size_t, int, const size_t *,
const size_t *, const size_t *, const size_t *, const size_t *, int, int);
extern int __KAI_KMPC_CONVENTION omp_target_associate_ptr(const void *, const void *, size_t, size_t, int);
extern int __KAI_KMPC_CONVENTION omp_target_disassociate_ptr(const void *, int);
/* OpenMP 5.0 */
extern int __KAI_KMPC_CONVENTION omp_get_device_num (void);
typedef void * omp_depend_t;
/* OpenMP 5.1 interop */
typedef intptr_t omp_intptr_t;
/* 0..omp_get_num_interop_properties()-1 are reserved for implementation-defined properties */
typedef enum omp_interop_property {
omp_ipr_fr_id = -1,
omp_ipr_fr_name = -2,
omp_ipr_vendor = -3,
omp_ipr_vendor_name = -4,
omp_ipr_device_num = -5,
omp_ipr_platform = -6,
omp_ipr_device = -7,
omp_ipr_device_context = -8,
omp_ipr_targetsync = -9,
omp_ipr_first = -9
} omp_interop_property_t;
#define omp_interop_none 0
typedef enum omp_interop_rc {
omp_irc_no_value = 1,
omp_irc_success = 0,
omp_irc_empty = -1,
omp_irc_out_of_range = -2,
omp_irc_type_int = -3,
omp_irc_type_ptr = -4,
omp_irc_type_str = -5,
omp_irc_other = -6
} omp_interop_rc_t;
typedef enum omp_interop_fr {
omp_ifr_cuda = 1,
omp_ifr_cuda_driver = 2,
omp_ifr_opencl = 3,
omp_ifr_sycl = 4,
omp_ifr_hip = 5,
omp_ifr_level_zero = 6,
omp_ifr_last = 7
} omp_interop_fr_t;
typedef void * omp_interop_t;
/*!
* The `omp_get_num_interop_properties` routine retrieves the number of implementation-defined properties available for an `omp_interop_t` object.
*/
extern int __KAI_KMPC_CONVENTION omp_get_num_interop_properties(const omp_interop_t);
/*!
* The `omp_get_interop_int` routine retrieves an integer property from an `omp_interop_t` object.
*/
extern omp_intptr_t __KAI_KMPC_CONVENTION omp_get_interop_int(const omp_interop_t, omp_interop_property_t, int *);
/*!
* The `omp_get_interop_ptr` routine retrieves a pointer property from an `omp_interop_t` object.
*/
extern void * __KAI_KMPC_CONVENTION omp_get_interop_ptr(const omp_interop_t, omp_interop_property_t, int *);
/*!
* The `omp_get_interop_str` routine retrieves a string property from an `omp_interop_t` object.
*/
extern const char * __KAI_KMPC_CONVENTION omp_get_interop_str(const omp_interop_t, omp_interop_property_t, int *);
/*!
* The `omp_get_interop_name` routine retrieves a property name from an `omp_interop_t` object.
*/
extern const char * __KAI_KMPC_CONVENTION omp_get_interop_name(const omp_interop_t, omp_interop_property_t);
/*!
* The `omp_get_interop_type_desc` routine retrieves a description of the type of a property associated with an `omp_interop_t` object.
*/
extern const char * __KAI_KMPC_CONVENTION omp_get_interop_type_desc(const omp_interop_t, omp_interop_property_t);
/*!
* The `omp_get_interop_rc_desc` routine retrieves a description of the return code associated with an `omp_interop_t` object.
*/
extern const char * __KAI_KMPC_CONVENTION omp_get_interop_rc_desc(const omp_interop_t, omp_interop_rc_t);
/* OpenMP 5.1 device memory routines */
/*!
* The `omp_target_memcpy_async` routine asynchronously performs a copy between any combination of host and device pointers.
*/
extern int __KAI_KMPC_CONVENTION omp_target_memcpy_async(void *, const void *, size_t, size_t, size_t, int,
int, int, omp_depend_t *);
/*!
* The `omp_target_memcpy_rect_async` routine asynchronously performs a copy between any combination of host and device pointers.
*/
extern int __KAI_KMPC_CONVENTION omp_target_memcpy_rect_async(void *, const void *, size_t, int, const size_t *,
const size_t *, const size_t *, const size_t *, const size_t *, int, int,
int, omp_depend_t *);
/*!
* The `omp_get_mapped_ptr` routine returns the device pointer that is associated with a host pointer for a given device.
*/
extern void * __KAI_KMPC_CONVENTION omp_get_mapped_ptr(const void *, int);
extern int __KAI_KMPC_CONVENTION omp_target_is_accessible(const void *, size_t, int);
/* kmp API functions */
extern int __KAI_KMPC_CONVENTION kmp_get_stacksize (void);
extern void __KAI_KMPC_CONVENTION kmp_set_stacksize (int);
extern size_t __KAI_KMPC_CONVENTION kmp_get_stacksize_s (void);
extern void __KAI_KMPC_CONVENTION kmp_set_stacksize_s (size_t);
extern int __KAI_KMPC_CONVENTION kmp_get_blocktime (void);
extern int __KAI_KMPC_CONVENTION kmp_get_library (void);
extern void __KAI_KMPC_CONVENTION kmp_set_blocktime (int);
extern void __KAI_KMPC_CONVENTION kmp_set_library (int);
extern void __KAI_KMPC_CONVENTION kmp_set_library_serial (void);
extern void __KAI_KMPC_CONVENTION kmp_set_library_turnaround (void);
extern void __KAI_KMPC_CONVENTION kmp_set_library_throughput (void);
extern void __KAI_KMPC_CONVENTION kmp_set_defaults (char const *);
extern void __KAI_KMPC_CONVENTION kmp_set_disp_num_buffers (int);
/* Intel affinity API */
typedef void * kmp_affinity_mask_t;
extern int __KAI_KMPC_CONVENTION kmp_set_affinity (kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_get_affinity (kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_get_affinity_max_proc (void);
extern void __KAI_KMPC_CONVENTION kmp_create_affinity_mask (kmp_affinity_mask_t *);
extern void __KAI_KMPC_CONVENTION kmp_destroy_affinity_mask (kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_set_affinity_mask_proc (int, kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_unset_affinity_mask_proc (int, kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_get_affinity_mask_proc (int, kmp_affinity_mask_t *);
/* OpenMP 4.0 affinity API */
typedef enum omp_proc_bind_t {
omp_proc_bind_false = 0,
omp_proc_bind_true = 1,
omp_proc_bind_master = 2,
omp_proc_bind_close = 3,
omp_proc_bind_spread = 4
} omp_proc_bind_t;
extern omp_proc_bind_t __KAI_KMPC_CONVENTION omp_get_proc_bind (void);
/* OpenMP 4.5 affinity API */
extern int __KAI_KMPC_CONVENTION omp_get_num_places (void);
extern int __KAI_KMPC_CONVENTION omp_get_place_num_procs (int);
extern void __KAI_KMPC_CONVENTION omp_get_place_proc_ids (int, int *);
extern int __KAI_KMPC_CONVENTION omp_get_place_num (void);
extern int __KAI_KMPC_CONVENTION omp_get_partition_num_places (void);
extern void __KAI_KMPC_CONVENTION omp_get_partition_place_nums (int *);
extern void * __KAI_KMPC_CONVENTION kmp_malloc (size_t);
extern void * __KAI_KMPC_CONVENTION kmp_aligned_malloc (size_t, size_t);
extern void * __KAI_KMPC_CONVENTION kmp_calloc (size_t, size_t);
extern void * __KAI_KMPC_CONVENTION kmp_realloc (void *, size_t);
extern void __KAI_KMPC_CONVENTION kmp_free (void *);
extern void __KAI_KMPC_CONVENTION kmp_set_warnings_on(void);
extern void __KAI_KMPC_CONVENTION kmp_set_warnings_off(void);
/* OpenMP 5.0 Tool Control */
typedef enum omp_control_tool_result_t {
omp_control_tool_notool = -2,
omp_control_tool_nocallback = -1,
omp_control_tool_success = 0,
omp_control_tool_ignored = 1
} omp_control_tool_result_t;
typedef enum omp_control_tool_t {
omp_control_tool_start = 1,
omp_control_tool_pause = 2,
omp_control_tool_flush = 3,
omp_control_tool_end = 4
} omp_control_tool_t;
extern int __KAI_KMPC_CONVENTION omp_control_tool(int, int, void*);
/* OpenMP 5.0 Memory Management */
typedef uintptr_t omp_uintptr_t;
typedef enum {
omp_atk_sync_hint = 1,
omp_atk_alignment = 2,
omp_atk_access = 3,
omp_atk_pool_size = 4,
omp_atk_fallback = 5,
omp_atk_fb_data = 6,
omp_atk_pinned = 7,
omp_atk_partition = 8
} omp_alloctrait_key_t;
typedef enum {
omp_atv_false = 0,
omp_atv_true = 1,
omp_atv_contended = 3,
omp_atv_uncontended = 4,
omp_atv_serialized = 5,
omp_atv_sequential = omp_atv_serialized, // (deprecated)
omp_atv_private = 6,
omp_atv_all = 7,
omp_atv_thread = 8,
omp_atv_pteam = 9,
omp_atv_cgroup = 10,
omp_atv_default_mem_fb = 11,
omp_atv_null_fb = 12,
omp_atv_abort_fb = 13,
omp_atv_allocator_fb = 14,
omp_atv_environment = 15,
omp_atv_nearest = 16,
omp_atv_blocked = 17,
omp_atv_interleaved = 18
} omp_alloctrait_value_t;
#define omp_atv_default ((omp_uintptr_t)-1)
typedef struct {
omp_alloctrait_key_t key;
omp_uintptr_t value;
} omp_alloctrait_t;
# if defined(_WIN32)
// On Windows cl and icl do not support 64-bit enum, let's use integer then.
typedef omp_uintptr_t omp_allocator_handle_t;
extern __KMP_IMP omp_allocator_handle_t const omp_null_allocator;
extern __KMP_IMP omp_allocator_handle_t const omp_default_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_large_cap_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_const_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_high_bw_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_low_lat_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_cgroup_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_pteam_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_thread_mem_alloc;
/* Preview of target memory support */
extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_host_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_device_mem_alloc;
typedef omp_uintptr_t omp_memspace_handle_t;
extern __KMP_IMP omp_memspace_handle_t const omp_default_mem_space;
extern __KMP_IMP omp_memspace_handle_t const omp_large_cap_mem_space;
extern __KMP_IMP omp_memspace_handle_t const omp_const_mem_space;
extern __KMP_IMP omp_memspace_handle_t const omp_high_bw_mem_space;
extern __KMP_IMP omp_memspace_handle_t const omp_low_lat_mem_space;
/* Preview of target memory support */
extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_host_mem_space;
extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_shared_mem_space;
extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_device_mem_space;
# else
# if __cplusplus >= 201103
typedef enum omp_allocator_handle_t : omp_uintptr_t
# else
typedef enum omp_allocator_handle_t
# endif
{
omp_null_allocator = 0,
omp_default_mem_alloc = 1,
omp_large_cap_mem_alloc = 2,
omp_const_mem_alloc = 3,
omp_high_bw_mem_alloc = 4,
omp_low_lat_mem_alloc = 5,
omp_cgroup_mem_alloc = 6,
omp_pteam_mem_alloc = 7,
omp_thread_mem_alloc = 8,
/* Preview of target memory support */
llvm_omp_target_host_mem_alloc = 100,
llvm_omp_target_shared_mem_alloc = 101,
llvm_omp_target_device_mem_alloc = 102,
KMP_ALLOCATOR_MAX_HANDLE = UINTPTR_MAX
} omp_allocator_handle_t;
# if __cplusplus >= 201103
typedef enum omp_memspace_handle_t : omp_uintptr_t
# else
typedef enum omp_memspace_handle_t
# endif
{
omp_default_mem_space = 0,
omp_large_cap_mem_space = 1,
omp_const_mem_space = 2,
omp_high_bw_mem_space = 3,
omp_low_lat_mem_space = 4,
/* Preview of target memory support */
llvm_omp_target_host_mem_space = 100,
llvm_omp_target_shared_mem_space = 101,
llvm_omp_target_device_mem_space = 102,
KMP_MEMSPACE_MAX_HANDLE = UINTPTR_MAX
} omp_memspace_handle_t;
# endif
extern omp_allocator_handle_t __KAI_KMPC_CONVENTION omp_init_allocator(omp_memspace_handle_t m,
int ntraits, omp_alloctrait_t traits[]);
extern void __KAI_KMPC_CONVENTION omp_destroy_allocator(omp_allocator_handle_t allocator);
extern void __KAI_KMPC_CONVENTION omp_set_default_allocator(omp_allocator_handle_t a);
extern omp_allocator_handle_t __KAI_KMPC_CONVENTION omp_get_default_allocator(void);
# ifdef __cplusplus
extern void *__KAI_KMPC_CONVENTION omp_alloc(size_t size, omp_allocator_handle_t a = omp_null_allocator);
extern void *__KAI_KMPC_CONVENTION omp_calloc(size_t nmemb, size_t size, omp_allocator_handle_t a = omp_null_allocator);
extern void *__KAI_KMPC_CONVENTION omp_realloc(void *ptr, size_t size,
omp_allocator_handle_t allocator = omp_null_allocator,
omp_allocator_handle_t free_allocator = omp_null_allocator);
extern void __KAI_KMPC_CONVENTION omp_free(void * ptr, omp_allocator_handle_t a = omp_null_allocator);
# else
extern void *__KAI_KMPC_CONVENTION omp_alloc(size_t size, omp_allocator_handle_t a);
extern void *__KAI_KMPC_CONVENTION omp_calloc(size_t nmemb, size_t size, omp_allocator_handle_t a);
extern void *__KAI_KMPC_CONVENTION omp_realloc(void *ptr, size_t size, omp_allocator_handle_t allocator,
omp_allocator_handle_t free_allocator);
extern void __KAI_KMPC_CONVENTION omp_free(void *ptr, omp_allocator_handle_t a);
# endif
/* OpenMP 5.0 Affinity Format */
extern void __KAI_KMPC_CONVENTION omp_set_affinity_format(char const *);
extern size_t __KAI_KMPC_CONVENTION omp_get_affinity_format(char *, size_t);
extern void __KAI_KMPC_CONVENTION omp_display_affinity(char const *);
extern size_t __KAI_KMPC_CONVENTION omp_capture_affinity(char *, size_t, char const *);
/* OpenMP 5.0 events */
# if defined(_WIN32)
// On Windows cl and icl do not support 64-bit enum, let's use integer then.
typedef omp_uintptr_t omp_event_handle_t;
# else
typedef enum omp_event_handle_t { KMP_EVENT_MAX_HANDLE = UINTPTR_MAX } omp_event_handle_t;
# endif
extern void __KAI_KMPC_CONVENTION omp_fulfill_event ( omp_event_handle_t event );
/* OpenMP 5.0 Pause Resources */
typedef enum omp_pause_resource_t {
omp_pause_resume = 0,
omp_pause_soft = 1,
omp_pause_hard = 2
} omp_pause_resource_t;
extern int __KAI_KMPC_CONVENTION omp_pause_resource(omp_pause_resource_t, int);
extern int __KAI_KMPC_CONVENTION omp_pause_resource_all(omp_pause_resource_t);
extern int __KAI_KMPC_CONVENTION omp_get_supported_active_levels(void);
/* OpenMP 5.1 */
extern void __KAI_KMPC_CONVENTION omp_set_num_teams(int num_teams);
extern int __KAI_KMPC_CONVENTION omp_get_max_teams(void);
extern void __KAI_KMPC_CONVENTION omp_set_teams_thread_limit(int limit);
extern int __KAI_KMPC_CONVENTION omp_get_teams_thread_limit(void);
/* OpenMP 5.1 Display Environment */
extern void omp_display_env(int verbose);
# if defined(_OPENMP) && _OPENMP >= 201811
#pragma omp begin declare variant match(device={kind(host)})
static inline int omp_is_initial_device(void) { return 1; }
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(nohost)})
static inline int omp_is_initial_device(void) { return 0; }
#pragma omp end declare variant
# endif
# undef __KAI_KMPC_CONVENTION
# undef __KMP_IMP
/* Warning:
The following typedefs are not standard, deprecated and will be removed in a future release.
*/
typedef int omp_int_t;
typedef double omp_wtime_t;
# ifdef __cplusplus
}
# endif
#endif /* __OMP_H */
|
spectralnorm-4.c | /* The Computer Language Benchmarks Game
* http://benchmarksgame.alioth.debian.org/
*
* Original C contributed by Sebastien Loisel
* Conversion to C++ by Jon Harrop
* OpenMP parallelize by The Anh Tran
* Add SSE by The Anh Tran
* Reconversion into C by Dan Farina
*/
#define _GNU_SOURCE
#include <omp.h>
#include <math.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#define false 0
#define true 1
/* define SIMD data type. 2 doubles encapsulated in one XMM register */
typedef double v2dt __attribute__((vector_size(16)));
static const v2dt v1 = {1.0, 1.0};
/* parameter for evaluate functions */
struct Param
{
double* u; /* source vector */
double* tmp; /* temporary */
double* v; /* destination vector */
int N; /* source/destination vector length */
int N2; /* = N/2 */
int r_begin; /* working range of each thread */
int r_end;
};
/* Return: 1.0 / (i + j) * (i + j +1) / 2 + i + 1; */
static double
eval_A(int i, int j)
{
/*
* 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* n * (n+1) is even number. Therefore, just (>> 1) for (/2)
*/
int d = (((i+j) * (i+j+1)) >> 1) + i+1;
return 1.0 / d;
}
/*
* Return type: 2 doubles in xmm register [double1, double2]
* double1 = 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* double2 = 1.0 / (i+1 + j) * (i+1 + j +1) / 2 + i+1 + 1;
*/
static v2dt
eval_A_i(int i, int j)
{
int d1 = (((i+j) * (i+j+1)) >> 1) + i+1;
int d2 = (((i+1 +j) * (i+1 +j+1)) >> 1) + (i+1) +1;
v2dt r = {d1, d2};
return v1 / r;
}
/*
* Return type: 2 doubles in xmm register [double1, double2]
* double1 = 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* double2 = 1.0 / (i + j+1) * (i + j+1 +1) / 2 + i + 1;
*/
static v2dt
eval_A_j(int i, int j)
{
int d1 = (((i+j) * (i+j+1)) >> 1) + i+1;
int d2 = (((i+ j+1) * (i+ j+1 +1)) >> 1) + i+1;
v2dt r = {d1, d2};
return v1 / r;
}
/* This function is called by many threads */
static void
eval_A_times_u(struct Param *p)
{
/* alias of source vector */
const v2dt *pU = (void *) p->u;
int i;
int ie;
for (i = p->r_begin, ie = p->r_end; i < ie; i++)
{
v2dt sum = {0, 0};
/* xmm = 2 doubles. This loop run from [0 .. N/2) */
int j;
for (j = 0; j < p->N2; j++)
sum += pU[j] * eval_A_j(i, j*2);
/* write result */
{
double *mem = (void *) ∑
p->tmp[i] = mem[0] + mem[1];
}
/* If source vector is odd size. This should be called <= 1 time */
for (j = j*2; __builtin_expect(j < p->N, false); j++)
p->tmp[i] += eval_A(i, j) * p->u[j];
}
}
static void
eval_At_times_u(struct Param *p)
{
const v2dt *pT = (void *) p->tmp;
int i;
int ie;
for (i = p->r_begin, ie = p->r_end; i < ie; i++)
{
v2dt sum = {0, 0};
int j;
for (j = 0; j < p->N2; j++)
sum += pT[j] * eval_A_i(j*2, i);
{
double *mem = (void *) ∑
p->v[i] = mem[0] + mem[1];
}
/* odd size array */
for (j = j*2; __builtin_expect(j < p->N, false); j++)
p->v[i] += eval_A(j, i) * p->tmp[j];
}
}
/*
* Called by N threads.
*
* Each thread modifies its portion in destination vector -> barrier needed to
* sync access
*/
static void
eval_AtA_times_u(struct Param *p)
{
eval_A_times_u(p);
#pragma omp barrier
eval_At_times_u(p);
#pragma omp barrier
}
/*
* Shootout bench uses affinity to emulate single core processor. This
* function searches for appropriate number of threads to spawn.
*/
static int
GetThreadCount()
{
cpu_set_t cs;
int i;
int count = 0;
CPU_ZERO(&cs);
sched_getaffinity(0, sizeof(cs), &cs);
for (i = 0; i < 16; i++)
if (CPU_ISSET(i, &cs))
count++;
return count;
}
static double
spectral_game(int N)
{
/* Align 64 byte for L2 cache line */
__attribute__((aligned(64))) double u[N];
__attribute__((aligned(64))) double tmp[N];
__attribute__((aligned(64))) double v[N];
double vBv = 0.0;
double vv = 0.0;
#pragma omp parallel default(shared) num_threads(GetThreadCount())
{
int i;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
u[i] = 1.0;
/*
* this block will be executed by NUM_THREADS variable declared in this
* block is private for each thread
*/
int threadid = omp_get_thread_num();
int threadcount = omp_get_num_threads();
int chunk = N / threadcount;
int ite;
struct Param my_param;
my_param.tmp = tmp;
my_param.N = N;
my_param.N2 = N/2;
/*
* calculate each thread's working range [range1 .. range2) => static
* schedule here
*/
my_param.r_begin = threadid * chunk;
my_param.r_end = (threadid < (threadcount -1)) ?
(my_param.r_begin + chunk) : N;
for (ite = 0; ite < 10; ite++)
{
my_param.u = u; /* source vec is u */
my_param.v = v; /* destination vec is v */
eval_AtA_times_u(&my_param);
my_param.u = v; /* source is v */
my_param.v = u; /* destination is u */
eval_AtA_times_u(&my_param);
}
/* multi thread adding */
{
int i;
#pragma omp for schedule(static) reduction( + : vBv, vv ) nowait
for (i = 0; i < N; i++)
{
vv += v[i] * v[i];
vBv += u[i] * v[i];
}
}
}
/* end parallel region */
return sqrt(vBv/vv);
}
int
main(int argc, char *argv[])
{
int N = ((argc >= 2) ? atoi(argv[1]) : 2000);
printf("%.9f\n", spectral_game(N));
return 0;
}
|
threading_utils.h | /*!
* Copyright 2015-2019 by Contributors
* \file common.h
* \brief Threading utilities
*/
#ifndef XGBOOST_COMMON_THREADING_UTILS_H_
#define XGBOOST_COMMON_THREADING_UTILS_H_
#include <dmlc/common.h>
#include <vector>
#include <algorithm>
#include "xgboost/logging.h"
namespace xgboost {
namespace common {
// Represent simple range of indexes [begin, end)
// Inspired by tbb::blocked_range
class Range1d {
public:
Range1d(size_t begin, size_t end): begin_(begin), end_(end) {
CHECK_LT(begin, end);
}
size_t begin() const { // NOLINT
return begin_;
}
size_t end() const { // NOLINT
return end_;
}
private:
size_t begin_;
size_t end_;
};
// Split 2d space to balanced blocks
// Implementation of the class is inspired by tbb::blocked_range2d
// However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example:
// [ 1,2,3 ]
// [ 4,5,6 ]
// [ 7,8,9 ]
// But the class is able to work with different sizes in each 'row'. Example:
// [ 1,2 ]
// [ 3,4,5,6 ]
// [ 7,8,9]
// If grain_size is 2: It produces following blocks:
// [1,2], [3,4], [5,6], [7,8], [9]
// The class helps to process data in several tree nodes (non-balanced usually) in parallel
// Using nested parallelism (by nodes and by data in each node)
// it helps to improve CPU resources utilization
class BlockedSpace2d {
public:
// Example of space:
// [ 1,2 ]
// [ 3,4,5,6 ]
// [ 7,8,9]
// BlockedSpace2d will create following blocks (tasks) if grain_size=2:
// 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values)
// 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values)
// 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values)
// 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values)
// 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values)
// Arguments:
// dim1 - size of the first dimension in the space
// getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index
// grain_size - max size of produced blocks
template<typename Func>
BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) {
for (size_t i = 0; i < dim1; ++i) {
const size_t size = getter_size_dim2(i);
const size_t n_blocks = size/grain_size + !!(size % grain_size);
for (size_t iblock = 0; iblock < n_blocks; ++iblock) {
const size_t begin = iblock * grain_size;
const size_t end = std::min(begin + grain_size, size);
AddBlock(i, begin, end);
}
}
}
// Amount of blocks(tasks) in a space
size_t Size() const {
return ranges_.size();
}
// get index of the first dimension of i-th block(task)
size_t GetFirstDimension(size_t i) const {
CHECK_LT(i, first_dimension_.size());
return first_dimension_[i];
}
// get a range of indexes for the second dimension of i-th block(task)
Range1d GetRange(size_t i) const {
CHECK_LT(i, ranges_.size());
return ranges_[i];
}
private:
void AddBlock(size_t first_dimension, size_t begin, size_t end) {
first_dimension_.push_back(first_dimension);
ranges_.emplace_back(begin, end);
}
std::vector<Range1d> ranges_;
std::vector<size_t> first_dimension_;
};
// Wrapper to implement nested parallelism with simple omp parallel for
template <typename Func>
void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) {
const size_t num_blocks_in_space = space.Size();
nthreads = std::min(nthreads, omp_get_max_threads());
nthreads = std::max(nthreads, 1);
dmlc::OMPException omp_exc;
#pragma omp parallel num_threads(nthreads)
{
omp_exc.Run(
[](size_t num_blocks_in_space, const BlockedSpace2d& space, int nthreads, Func func) {
size_t tid = omp_get_thread_num();
size_t chunck_size =
num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads);
size_t begin = chunck_size * tid;
size_t end = std::min(begin + chunck_size, num_blocks_in_space);
for (auto i = begin; i < end; i++) {
func(space.GetFirstDimension(i), space.GetRange(i));
}
}, num_blocks_in_space, space, nthreads, func);
}
omp_exc.Rethrow();
}
template <typename Func>
void ParallelFor(size_t size, size_t nthreads, Func fn) {
dmlc::OMPException omp_exc;
#pragma omp parallel for num_threads(nthreads)
for (omp_ulong i = 0; i < size; ++i) {
omp_exc.Run(fn, i);
}
omp_exc.Rethrow();
}
/* \brief Configure parallel threads.
*
* \param p_threads Number of threads, when it's less than or equal to 0, this function
* will change it to number of process on system.
*
* \return Global openmp max threads before configuration.
*/
inline int32_t OmpSetNumThreads(int32_t* p_threads) {
auto& threads = *p_threads;
int32_t nthread_original = omp_get_max_threads();
if (threads <= 0) {
threads = omp_get_num_procs();
}
omp_set_num_threads(threads);
return nthread_original;
}
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_THREADING_UTILS_H_
|
quadtree.h | #pragma once
#include "common.h"
#include <vector>
#include <queue>
#include <iostream>
#include <string>
#include <omp.h>
struct quadnode;
typedef quadnode* nodeptr;
struct quadnode {
nodeptr child[4];
double centerx, centery, xmin, xmax, ymin, ymax, totalMass = 0;
particleptr within = NULL;
int npar = 0;
std::string name;
quadnode(double xmin, double xmax, double ymin, double ymax, std::string name) {
this->xmin = xmin;
this->xmax = xmax;
this->ymin = ymin;
this->ymax = ymax;
this->name = name;
this->centerx = this->centery = 0;
this->child[0] = this->child[1] = this->child[2] = this->child[3] = NULL;
}
bool incell(double x, double y) & {
return (x > this->xmin && x <= this->xmax) &&
(y > this->ymin && y <= this->ymax);
}
void make() {
double xhalf = (this->xmin + this->xmax) / 2.,
yhalf = (this->ymin + this->ymax) / 2.;
this->child[0] = new quadnode(this->xmin, xhalf, this->ymin, yhalf, this->name + ".0");
this->child[1] = new quadnode(this->xmin, xhalf, yhalf, this->ymax, this->name + ".1");
this->child[2] = new quadnode(xhalf, this->xmax, this->ymin, yhalf, this->name + ".2");
this->child[3] = new quadnode(xhalf, this->xmax, yhalf, this->ymax, this->name + ".3");
}
void add(const particle& par) {
if (!this->incell(par.x, par.y)) return;
if (this->npar > 0) {
if (this->npar == 1) {
this->make();
for (int i = 0; i < 4; i++)
(this->child[i])->add(*(this->within));
this->within = NULL;
}
for (int i = 0; i < 4; i++)
(this->child[i])->add(par);
} else {
this->within = (particleptr)∥
}
this->centerx = (this->npar * this->centerx + par.x) / double(this->npar + 1);
this->centery = (this->npar * this->centery + par.y) / double(this->npar + 1);
this->npar++;
this->totalMass += par.mass;
}
void make_parallel() {
double xhalf = (this->xmin + this->xmax) / 2.,
yhalf = (this->ymin + this->ymax) / 2.;
//#pragma omp parallel
{
double a[4] = { this->xmin , this->xmin , xhalf , xhalf };
double b[4] = { xhalf , xhalf , this->xmax , this->xmax };
double c[4] = { this->ymin, yhalf ,this->ymin, yhalf };
double d[4] = { yhalf ,this->ymax, yhalf, this->ymax };
//#pragma omp for
for (int i = 0; i < 4; i++)
this->child[i] = new quadnode(a[i], b[i], c[i], d[i], "");//this->name + "." + std::to_string(i)
}
}
void add_parallel(const particle& par, int depth) {
if (!this->incell(par.x, par.y)) return;
if (this->npar > 0) {
if (this->npar == 1) {
this->make_parallel();
if (depth < DEPTH_LIM) {
//#pragma omp parallel
{
//#pragma omp for
for (int i = 0; i < 4; i++)
(this->child[i])->add_parallel(*(this->within), depth + 1);
}
} else {
for (int i = 0; i < 4; i++)
(this->child[i])->add(*(this->within));
}
this->within = NULL;
}
if (depth < DEPTH_LIM) {
//#pragma omp parallel
{
//#pragma omp for
for (int i = 0; i < 4; i++)
(this->child[i])->add_parallel(par, depth + 1);
}
} else {
for (int i = 0; i < 4; i++)
(this->child[i])->add(par);
}
}
else {
this->within = (particleptr)∥
}
this->centerx = (this->npar * this->centerx + par.x) / double(this->npar + 1);
this->centery = (this->npar * this->centery + par.y) / double(this->npar + 1);
this->npar++;
this->totalMass += par.mass;
}
bool test(const particle& par) {
if (this->child[0] != NULL) {
double s = this->xmax - this->xmin;
double dx = par.x - this->centerx;
double dy = par.y - this->centery;
double dist = sqrt(dx * dx + dy * dy);
return dist / s > tree_thres && dist > softening;
} else {
return this->within != ∥
}
}
std::vector<particleptr> all() {
if (this->within != NULL) {
std::vector<particleptr> result;
result.push_back(this->within);
return result;
}
else {
std::vector<particleptr> result;
for (int i = 0; i < 4; i++) if (this->child[i] != NULL) {
std::vector<particleptr> subresult = (this->child[i])->all();
result.insert(result.end(), subresult.begin(), subresult.end());
}
return result;
}
}
std::vector<particleptr> all_parallel() {
if (this->within != NULL) {
std::vector<particleptr> result;
result.push_back(this->within);
return result;
}
else {
std::vector<particleptr> result;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 4; i++) if (this->child[i] != NULL) {
std::vector<particleptr> subresult = (this->child[i])->all();
result.insert(result.end(), subresult.begin(), subresult.end());
}
}
return result;
}
}
void printtree() {
std::cout << this->name << std::endl;
for (int i = 0; i < 4; i++)
if (this->child[i] != NULL) this->child[i]->printtree();
}
void update(particle& par)& {
double rx = this->centerx - par.x;
double ry = this->centery - par.y;
double r2 = softensqdist(rx * rx + ry * ry);
par.vx += newton_g * timestep * this->npar * this->totalMass / par.mass / r2 / sqrt(r2) * rx;
par.vy += newton_g * timestep * this->npar * this->totalMass / par.mass / r2 / sqrt(r2) * ry;
}
};
|
main.c | /* Copyright (C) 2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
/* These need to be before any possible inclusions of stdint.h or inttypes.h.
* */
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include "./generator/make_graph.h"
#include "./generator/utils.h"
#include "common.h"
#include <math.h>
#include <mpi.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdio.h>
#include <limits.h>
#include <stdint.h>
#include <inttypes.h>
static int compare_doubles(const void* a, const void* b) {
double aa = *(const double*)a;
double bb = *(const double*)b;
return (aa < bb) ? -1 : (aa == bb) ? 0 : 1;
}
enum {s_minimum, s_firstquartile, s_median, s_thirdquartile, s_maximum, s_mean, s_std, s_LAST};
static void get_statistics(const double x[], int n, double r[s_LAST]) {
double temp;
int i;
/* Compute mean. */
temp = 0;
for (i = 0; i < n; ++i) temp += x[i];
temp /= n;
r[s_mean] = temp;
/* Compute std. dev. */
temp = 0;
for (i = 0; i < n; ++i) temp += (x[i] - r[s_mean]) * (x[i] - r[s_mean]);
temp /= n - 1;
r[s_std] = sqrt(temp);
/* Sort x. */
double* xx = (double*)xmalloc(n * sizeof(double));
memcpy(xx, x, n * sizeof(double));
qsort(xx, n, sizeof(double), compare_doubles);
/* Get order statistics. */
r[s_minimum] = xx[0];
r[s_firstquartile] = (xx[(n - 1) / 4] + xx[n / 4]) * .5;
r[s_median] = (xx[(n - 1) / 2] + xx[n / 2]) * .5;
r[s_thirdquartile] = (xx[n - 1 - (n - 1) / 4] + xx[n - 1 - n / 4]) * .5;
r[s_maximum] = xx[n - 1];
/* Clean up. */
free(xx);
}
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
setup_globals();
/* Parse arguments. */
int SCALE = 16;
int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */
if (argc >= 2) SCALE = atoi(argv[1]);
if (argc >= 3) edgefactor = atoi(argv[2]);
if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) {
if (rank == 0) {
fprintf(stderr, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]);
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
uint64_t seed1 = 2, seed2 = 3;
const char* filename = getenv("TMPFILE");
/* If filename is NULL, store data in memory */
tuple_graph tg;
tg.nglobaledges = (int64_t)(edgefactor) << SCALE;
int64_t nglobalverts = (int64_t)(1) << SCALE;
tg.data_in_file = (filename != NULL);
if (tg.data_in_file) {
MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL);
MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile);
MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge));
MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL);
MPI_File_set_atomicity(tg.edgefile, 0);
}
/* Make the raw graph edges. */
/* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by
* validator). */
int num_bfs_roots = 64;
int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t));
int64_t max_used_vertex = 0;
double make_graph_start = MPI_Wtime();
{
/* Spread the two 64-bit numbers into five nonzero values in the correct
* range. */
uint_fast32_t seed[5];
make_mrg_seed(seed1, seed2, seed);
/* As the graph is being generated, also keep a bitmap of vertices with
* incident edges. We keep a grid of processes, each row of which has a
* separate copy of the bitmap (distributed among the processes in the
* row), and then do an allreduce at the end. This scheme is used to avoid
* non-local communication and reading the file separately just to find BFS
* roots. */
MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE;
int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT);
if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) {
bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT);
}
int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes;
int nrows = size / ranks_per_row;
int my_row = -1, my_col = -1;
unsigned char* restrict has_edge = NULL;
MPI_Comm cart_comm;
{
int dims[2] = {size / ranks_per_row, ranks_per_row};
int periods[2] = {0, 0};
MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm);
}
int in_generating_rectangle = 0;
if (cart_comm != MPI_COMM_NULL) {
in_generating_rectangle = 1;
{
int dims[2], periods[2], coords[2];
MPI_Cart_get(cart_comm, 2, dims, periods, coords);
my_row = coords[0];
my_col = coords[1];
}
MPI_Comm this_col;
MPI_Comm_split(cart_comm, my_col, my_row, &this_col);
MPI_Comm_free(&cart_comm);
has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes);
memset(has_edge, 0, bitmap_size_in_bytes);
/* Every rank in a given row creates the same vertices (for updating the
* bitmap); only one writes them to the file (or final memory buffer). */
packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge));
MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows;
/* fprintf(stderr, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row); */
if (tg.data_in_file) {
tg.edgememory_size = 0;
tg.edgememory = NULL;
} else {
int my_pos = my_row + my_col * nrows;
int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ?
(tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) :
-1;
int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE;
int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) +
FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) +
(my_pos == last_pos ? edges_left : 0);
/* fprintf(stderr, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */
tg.edgememory_size = nedges;
tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge));
}
MPI_Offset block_idx;
for (block_idx = 0; block_idx < block_limit; ++block_idx) {
/* fprintf(stderr, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */
MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges);
MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE);
packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ?
tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) :
buf;
/* fprintf(stderr, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */
if (!tg.data_in_file && block_idx % ranks_per_row == my_col) {
assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size);
}
generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf);
if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */
MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE);
}
ptrdiff_t i;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < edge_count; ++i) {
int64_t src = get_v0_from_edge(&actual_buf[i]);
int64_t tgt = get_v1_from_edge(&actual_buf[i]);
if (src == tgt) continue;
if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT));
}
if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT));
}
}
}
free(buf);
#if 0
/* The allreduce for each root acts like we did this: */
MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col);
#endif
MPI_Comm_free(&this_col);
} else {
tg.edgememory = NULL;
tg.edgememory_size = 0;
}
MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD);
/* Find roots and max used vertex */
{
uint64_t counter = 0;
int bfs_root_idx;
for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) {
int64_t root;
while (1) {
double d[2];
make_random_numbers(2, seed1, seed2, counter, d);
root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts;
counter += 2;
if (counter > 2 * nglobalverts) break;
int is_duplicate = 0;
int i;
for (i = 0; i < bfs_root_idx; ++i) {
if (root == bfs_roots[i]) {
is_duplicate = 1;
break;
}
}
if (is_duplicate) continue; /* Everyone takes the same path here */
int root_ok = 0;
if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) {
root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0;
}
MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);
if (root_ok) break;
}
bfs_roots[bfs_root_idx] = root;
}
num_bfs_roots = bfs_root_idx;
/* Find maximum non-zero-degree vertex. */
{
int64_t i;
max_used_vertex = 0;
if (in_generating_rectangle) {
for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) {
if (i > nglobalverts) continue;
if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) {
max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes;
break;
}
}
}
MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD);
}
}
if (in_generating_rectangle) {
MPI_Free_mem(has_edge);
}
if (tg.data_in_file) {
MPI_File_sync(tg.edgefile);
}
}
double make_graph_stop = MPI_Wtime();
double make_graph_time = make_graph_stop - make_graph_start;
if (rank == 0) { /* Not an official part of the results */
fprintf(stderr, "graph_generation: %f s\n", make_graph_time);
}
/* Make user's graph data structure. */
double data_struct_start = MPI_Wtime();
make_graph_data_structure(&tg);
double data_struct_stop = MPI_Wtime();
double data_struct_time = data_struct_stop - data_struct_start;
if (rank == 0) { /* Not an official part of the results */
fprintf(stderr, "construction_time: %f s\n", data_struct_time);
}
/* Number of edges visited in each BFS; a double so get_statistics can be
* used directly. */
double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double));
/* Run BFS. */
int validation_passed = 1;
double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double));
double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double));
uint64_t nlocalverts = get_nlocalverts_for_pred();
int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t));
int bfs_root_idx;
for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) {
int64_t root = bfs_roots[bfs_root_idx];
if (rank == 0) fprintf(stderr, "Running BFS %d\n", bfs_root_idx);
/* Clear the pred array. */
memset(pred, 0, nlocalverts * sizeof(int64_t));
/* Do the actual BFS. */
double bfs_start = MPI_Wtime();
run_bfs(root, &pred[0],0);
double bfs_stop = MPI_Wtime();
bfs_times[bfs_root_idx] = bfs_stop - bfs_start;
if (rank == 0) fprintf(stderr, "Time for BFS %d is %f\n", bfs_root_idx, bfs_times[bfs_root_idx]);
/* Validate result. */
if (rank == 0) fprintf(stderr, "Validating BFS %d\n", bfs_root_idx);
double validate_start = MPI_Wtime();
int64_t edge_visit_count;
int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count);
double validate_stop = MPI_Wtime();
validate_times[bfs_root_idx] = validate_stop - validate_start;
if (rank == 0) fprintf(stderr, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]);
edge_counts[bfs_root_idx] = (double)edge_visit_count;
if (rank == 0) fprintf(stderr, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / bfs_times[bfs_root_idx]);
if (!validation_passed_one) {
validation_passed = 0;
if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n");
break;
}
}
MPI_Free_mem(pred);
free(bfs_roots);
free_graph_data_structure();
if (tg.data_in_file) {
MPI_File_close(&tg.edgefile);
} else {
free(tg.edgememory); tg.edgememory = NULL;
}
/* Print results. */
if (rank == 0) {
if (!validation_passed) {
fprintf(stdout, "No results printed for invalid run.\n");
} else {
int i;
fprintf(stdout, "SCALE: %d\n", SCALE);
fprintf(stdout, "edgefactor: %d\n", edgefactor);
fprintf(stdout, "NBFS: %d\n", num_bfs_roots);
fprintf(stdout, "graph_generation: %g\n", make_graph_time);
fprintf(stdout, "num_mpi_processes: %d\n", size);
fprintf(stdout, "construction_time: %g\n", data_struct_time);
double stats[s_LAST];
get_statistics(bfs_times, num_bfs_roots, stats);
fprintf(stdout, "min_time: %g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_time: %g\n", stats[s_firstquartile]);
fprintf(stdout, "median_time: %g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_time: %g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_time: %g\n", stats[s_maximum]);
fprintf(stdout, "mean_time: %g\n", stats[s_mean]);
fprintf(stdout, "stddev_time: %g\n", stats[s_std]);
get_statistics(edge_counts, num_bfs_roots, stats);
fprintf(stdout, "min_nedge: %.11g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]);
fprintf(stdout, "median_nedge: %.11g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_nedge: %.11g\n", stats[s_maximum]);
fprintf(stdout, "mean_nedge: %.11g\n", stats[s_mean]);
fprintf(stdout, "stddev_nedge: %.11g\n", stats[s_std]);
double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double));
for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i];
get_statistics(secs_per_edge, num_bfs_roots, stats);
fprintf(stdout, "min_TEPS: %g\n", 1. / stats[s_maximum]);
fprintf(stdout, "firstquartile_TEPS: %g\n", 1. / stats[s_thirdquartile]);
fprintf(stdout, "median_TEPS: %g\n", 1. / stats[s_median]);
fprintf(stdout, "thirdquartile_TEPS: %g\n", 1. / stats[s_firstquartile]);
fprintf(stdout, "max_TEPS: %g\n", 1. / stats[s_minimum]);
fprintf(stdout, "harmonic_mean_TEPS: %g\n", 1. / stats[s_mean]);
/* Formula from:
* Title: The Standard Errors of the Geometric and Harmonic Means and
* Their Application to Index Numbers
* Author(s): Nilan Norris
* Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448
* Publisher(s): Institute of Mathematical Statistics
* Stable URL: http://www.jstor.org/stable/2235723
* (same source as in specification). */
fprintf(stdout, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1)));
free(secs_per_edge); secs_per_edge = NULL;
free(edge_counts); edge_counts = NULL;
get_statistics(validate_times, num_bfs_roots, stats);
fprintf(stdout, "min_validate: %g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_validate: %g\n", stats[s_firstquartile]);
fprintf(stdout, "median_validate: %g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_validate: %g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_validate: %g\n", stats[s_maximum]);
fprintf(stdout, "mean_validate: %g\n", stats[s_mean]);
fprintf(stdout, "stddev_validate: %g\n", stats[s_std]);
#if 0
for (i = 0; i < num_bfs_roots; ++i) {
fprintf(stdout, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]);
}
#endif
}
}
free(bfs_times);
free(validate_times);
cleanup_globals();
MPI_Finalize();
return 0;
}
|
diagsm_x_sky_u_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT c = 0; c < columns; ++c)
{
for (ALPHA_INT r = 0; r < A->rows; ++r)
{
alpha_mul(y[index2(c, r, ldy)], alpha, x[index2(c, r, ldx)]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
parallel_for.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file parallel_for.h
* \brief An implementation to run loop in parallel.
*/
#ifndef TVM_SUPPORT_PARALLEL_FOR_H_
#define TVM_SUPPORT_PARALLEL_FOR_H_
#include <tvm/runtime/c_runtime_api.h>
#include <functional>
#include <vector>
namespace tvm {
namespace support {
using PartitionerFuncType = std::function<std::vector<std::vector<int>>(int, int, int, int)>;
/*!
* \brief A partitioner to split the task to each thread in Round-robin manner.
* \param begin The start index of this parallel loop(inclusive).
* \param end The end index of this parallel loop(exclusive).
* \param step The traversal step to the index.
* \param num_threads The number of threads(the number of tasks to be partitioned to).
* \return A list with `num_threads` elements, and each is a list of integers indicating the loop
* indexes for the corresponding thread to process.
*/
TVM_DLL std::vector<std::vector<int>> rr_partitioner(int begin, int end, int step, int num_threads);
/*!
* \brief A runtime api provided to run the task function in parallel.
* e.g. A for loop:
* for (int i = 0; i < 10; i++) {
* a[i] = i;
* }
* should work the same as:
* parallel_for(0, 10, [&a](int index) {
* a[i] = i;
* });
* \param begin The start index of this parallel loop(inclusive).
* \param end The end index of this parallel loop(exclusive).
* \param f The task function to be executed. Assert to take an int index as input with no output.
* \param step The traversal step to the index.
* \param partitioner A partition function to split tasks to different threads. Use Round-robin
* partitioner by default.
* \note 1. Currently do not support nested parallel_for; 2. The order of execution in each thread
* is not guaranteed, the for loop task should be thread independent and thread safe.
*/
TVM_DLL void parallel_for(int begin, int end, const std::function<void(int)>& f, int step = 1,
const PartitionerFuncType partitioner = rr_partitioner);
/*!
* \brief An API to launch fix amount of threads to run the specific functor in parallel.
* Different from `parallel_for`, the partition is determined dynamically on the fly,
* i.e. any time when a thread is idle, it fetches the next task to run.
* The behavior is similar to dynamic scheduling in OpenMP:
*
* \#pragma omp parallel for schedule(dynamic) num_threads(num_threads)
* for (int i = 0; i < 10; i++) {
* a[i] = i;
* }
*
* \param begin The start index of this parallel loop (inclusive).
* \param end The end index of this parallel loop (exclusive).
* \param num_threads The number of threads to be used.
* \param f The task function to be executed. Takes the thread index and the task index as
* input with no output.
* \note `step` support is left for future work.
*/
TVM_DLL void parallel_for_dynamic(int begin, int end, int num_threads,
const std::function<void(int thread_id, int task_id)>& f);
} // namespace support
} // namespace tvm
#endif // TVM_SUPPORT_PARALLEL_FOR_H_
|
test.c | #include <stdio.h>
#include "../utilities/check.h"
#define N 100
int test_aligned(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int *b = a;
// offload
#pragma omp target simd map(tofrom: b[0:100]) aligned(b: 8*sizeof(int))
for(int k=0; k<N; k++)
b[k] = k;
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_collapsed(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target simd map(tofrom: a[0:100]) collapse(2)
for(int k=0; k<N/4; k++)
for(int l=0; l<4; l++)
a[k*4+l] = k*4+l;
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
#if 0
int test_lastprivate(){
// TODO
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int n;
// offload
#pragma omp target simd map(tofrom: a[0:100]) lastprivate(n)
for(int k=0; k<N; k++) {
a[k] = k;
n = k;
}
printf(" n = %d\n", n);
a[0] = n;
// host
for(i=0; i<N; i++)
aa[i] = i;
aa[0] = N-1;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
#endif
int test_linear(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int l = 0;
// offload
#pragma omp target simd map(tofrom: a[0:100]) linear(l: 2)
for(int k=0; k<N; k++) {
l = 2*k;
a[k] = l;
}
// host
for(i=0; i<N; i++)
aa[i] = 2*i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;;
}
}
return error;
}
int test_private(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int n;
// offload
#pragma omp target simd map(tofrom: a[0:100]) private(n)
for(int k=0; k<N; k++) {
n = k;
a[k] = n;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_safelen(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target simd map(tofrom: a[0:100]) safelen(2)
for(int k=0; k<100; k++) {
if (k > 1){
a[k] = a[k-2] + 2;
}
else{
a[k] = k;
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int main()
{
int error = 0;
check_offloading();
// Clauses
error += test_aligned();
error += test_collapsed();
//error += test_lastprivate();
error += test_linear();
error += test_private();
error += test_safelen();
// report
printf("done with %d errors\n", error);
return error;
}
|
DMD5_fmt_plug.c | /*
* DMD5_fmt.c
*
* DIGEST-MD5 authentication module for Solar Designer's John the Ripper
* Uses Solar Designer's MD5 implementation.
*
* This software is Copyright 2006, regenrecht@o2.pl, and
* Copyright 2011, 2013 magnum, and it is hereby released to the general
* public under the following terms: Redistribution and use in source and
* binary forms, with or without modification, are permitted.
*
* Input format:
* $DIGEST-MD5$ username $ realm $ nonce $ digest_uri $ cnonce $ nc $ qop $ response [ $ authzid ]
*
* Just base64-decode the blob you see when sniffing, to get all data needed
* for above.
*
* See https://tools.ietf.org/html/rfc2831 (Using Digest Authentication as a
* SASL Mechanism) for algorithm details.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_DMD5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_DMD5);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "md5.h"
#include "common.h"
#include "formats.h"
#include "memdbg.h"
#define FORMAT_LABEL "dmd5"
#define FORMAT_NAME "DIGEST-MD5 C/R"
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define FORMAT_TAG "$DIGEST-MD5$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MD5_HEX_SIZE (2 * BINARY_SIZE)
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define DSIZE (128 - sizeof(int))
#define CIPHERTEXT_LENGTH (DSIZE * 4)
#define PLAINTEXT_LENGTH 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const char itoa16_shr_04[] =
"0000000000000000"
"1111111111111111"
"2222222222222222"
"3333333333333333"
"4444444444444444"
"5555555555555555"
"6666666666666666"
"7777777777777777"
"8888888888888888"
"9999999999999999"
"aaaaaaaaaaaaaaaa"
"bbbbbbbbbbbbbbbb"
"cccccccccccccccc"
"dddddddddddddddd"
"eeeeeeeeeeeeeeee"
"ffffffffffffffff";
static const char itoa16_and_0f[] =
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef";
static struct custom_salt {
unsigned char login_id[DSIZE]; // username:realm
unsigned int login_id_len;
unsigned char nonces[DSIZE]; // :nonce:cnonce[:authzid]
unsigned int nonces_len;
unsigned char prehash_KD[DSIZE]; // :nonce:nc:cnonce:qop:hex_A2_hash
unsigned int prehash_KD_len;
} *cur_salt;
static uint32_t (*crypt_key)[BINARY_SIZE/4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static struct fmt_tests tests[] = {
{"$DIGEST-MD5$s3443$pjwstk$00$ldap/10.253.34.43$0734d94ad9abd5bd7fc5e7e77bcf49a8$00000001$auth-int$dd98347e6da3efd6c4ff2263a729ef77", "test"},
// Two hashes from https://tools.ietf.org/html/rfc2831#section-8
{"$DIGEST-MD5$chris$elwood.innosoft.com$OA6MG9tEQGm2hh$imap/elwood.innosoft.com$OA6MHXh6VqTrRk$00000001$auth$d388dad90d4bbd760a152321f2143af7", "secret"},
{"$DIGEST-MD5$chris$elwood.innosoft.com$OA9BSXrbuRhWay$acap/elwood.innosoft.com$OA9BSuZWMSpW8m$00000001$auth$6084c6db3fede7352c551284490fd0fc", "secret"},
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
PLAINTEXT_LENGTH + 1);
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
BINARY_SIZE);
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext + FORMAT_TAG_LEN;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
if (strlen(ciphertext) > CIPHERTEXT_LENGTH)
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64) // username
return 0;
data = p + 1; // realm
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // nonce
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // digest_uri
if (!(p = strchr(data, '$')) || (int)(p-data) >= DSIZE)
return 0;
data = p + 1; // cnonce
if (!(p = strchr(data, '$')) || (int)(p-data) > MD5_HEX_SIZE)
return 0;
/* if (hexlenl(data, 0) != p-data) // this is not always hex data!
return 0; */
data = p + 1; // nc
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // qop
if (strncmp(data, "auth", 4) && strncmp(data, "auth-int", 8) &&
strncmp(data, "auth-conf", 9))
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // authzid, optional
if ((p = strchr(data, '$'))) {
if ((int)(p-data) > MD5_HEX_SIZE || strlen(&p[1]) >= 8)
return 0;
} else if (strlen(data) > MD5_HEX_SIZE)
return 0;
if (hexlenl(data, &extra) != MD5_HEX_SIZE || extra)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static uint32_t out[BINARY_SIZE/4];
char response[MD5_HEX_SIZE + 1];
unsigned int i;
char *p, *data = ciphertext + FORMAT_TAG_LEN;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$');
if (p && (p - data + 1) < sizeof(response))
strnzcpy(response, data, p - data + 1);
else
strnzcpy(response, data, sizeof(response));
for (i = 0; i < BINARY_SIZE; ++i)
((unsigned char*)out)[i] =
(atoi16[ARCH_INDEX(response[i*2])] << 4)
+ atoi16[ARCH_INDEX(response[i*2+1])];
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
char username[64];
char realm[64];
char nonce[64];
char digest_uri[DSIZE];
char cnonce[MD5_HEX_SIZE + 1];
char nc[9];
char qop[9];
char authzid[8];
unsigned char *ptr_src, *ptr_dst, v, i;
char *ccopy = strdup(ciphertext);
char *p, *data = ccopy + FORMAT_TAG_LEN;
MD5_CTX ctx;
char A2[DSIZE];
unsigned char hash[BINARY_SIZE];
unsigned char hex_hash[2*MD5_HEX_SIZE];
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(username, data, sizeof(username));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(realm, data, sizeof(realm));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nonce, data, sizeof(nonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(digest_uri, data, sizeof(digest_uri));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(cnonce, data, sizeof(cnonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nc, data, sizeof(nc));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(qop, data, sizeof(qop));
data = p + 1;
if ((p = strchr(data, '$'))) {
*p = 0;
data = p + 1;
if (*data)
strnzcpy(authzid, data, sizeof(authzid));
else
*authzid = 0;
} else {
*authzid = 0;
}
if (!strcmp(qop, "auth"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s", digest_uri);
else if (!strcmp(qop, "auth-int") || !strcmp(qop, "auth-conf"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s:00000000000000000000000000000000",
digest_uri);
MD5_Init(&ctx);
MD5_Update(&ctx, A2, strlen((char*)A2));
MD5_Final(hash, &ctx);
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
*ptr_dst = 0;
snprintf((char*)cs.prehash_KD, sizeof(cs.prehash_KD),
":%s:%s:%s:%s:%s", nonce, nc, cnonce, qop, hex_hash);
cs.prehash_KD_len = strlen((char*)cs.prehash_KD);
if (authzid[0])
snprintf((char*)cs.nonces, sizeof(cs.nonces),
":%s:%s:%s", nonce, cnonce, authzid);
else
snprintf((char*)cs.nonces, sizeof(cs.nonces),
":%s:%s", nonce, cnonce);
cs.nonces_len = strlen((char*)cs.nonces);
snprintf((char*)cs.login_id, sizeof(cs.login_id),
"%s:%s:", username, realm);
cs.login_id_len = strlen((char*)cs.login_id);
MEM_FREE(ccopy);
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void set_key(char *key, int index)
{
strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char hash[16];
unsigned char hex_hash[MD5_HEX_SIZE];
unsigned char *ptr_src, *ptr_dst;
MD5_CTX ctx;
int i;
MD5_Init(&ctx);
// "username:realm"
MD5_Update(&ctx, cur_salt->login_id, cur_salt->login_id_len);
// "password"
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(hash, &ctx);
MD5_Init(&ctx);
// previous result
MD5_Update(&ctx, hash, BINARY_SIZE);
// ":nonce:cnonce[:authzid]"
MD5_Update(&ctx, cur_salt->nonces, cur_salt->nonces_len);
MD5_Final(hash, &ctx);
// hexify
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
unsigned char v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
MD5_Init(&ctx);
// previous result, in hex
MD5_Update(&ctx, hex_hash, MD5_HEX_SIZE);
// ":nonce:nc:cnonce:qop:hex_A2_hash
MD5_Update(&ctx, cur_salt->prehash_KD, cur_salt->prehash_KD_len);
MD5_Final((unsigned char*)crypt_key[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
int index;
uint32_t b = ((uint32_t*)binary)[0];
for (index = 0; index < count; index++)
if (crypt_key[index][0] == b)
return 1;
return 0;
#else
return ((uint32_t*)binary)[0] == crypt_key[0][0];
#endif
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
#define COMMON_GET_HASH_VAR crypt_key
#include "common-get-hash.h"
struct fmt_main fmt_DMD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
softmax_layer.c | #include "softmax_layer.h"
#include "blas.h"
#include "dark_cuda.h"
#include "utils.h"
#include "blas.h"
#include <float.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#define SECRET_NUM -1234
void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output)
{
int b;
for (b = 0; b < batch; ++b) {
int i;
int count = 0;
for (i = 0; i < hierarchy->groups; ++i) {
int group_size = hierarchy->group_size[i];
softmax(input + b*inputs + count, group_size, temp, output + b*inputs + count, 1);
count += group_size;
}
}
}
softmax_layer make_softmax_layer(int batch, int inputs, int groups)
{
assert(inputs%groups == 0);
fprintf(stderr, "softmax %4d\n", inputs);
softmax_layer l = { (LAYER_TYPE)0 };
l.type = SOFTMAX;
l.batch = batch;
l.groups = groups;
l.inputs = inputs;
l.outputs = inputs;
l.loss = (float*)xcalloc(inputs * batch, sizeof(float));
l.output = (float*)xcalloc(inputs * batch, sizeof(float));
l.delta = (float*)xcalloc(inputs * batch, sizeof(float));
l.cost = (float*)xcalloc(1, sizeof(float));
l.forward = forward_softmax_layer;
l.backward = backward_softmax_layer;
#ifdef GPU
l.forward_gpu = forward_softmax_layer_gpu;
l.backward_gpu = backward_softmax_layer_gpu;
l.output_gpu = cuda_make_array(l.output, inputs*batch);
l.loss_gpu = cuda_make_array(l.loss, inputs*batch);
l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
#endif
return l;
}
void forward_softmax_layer(const softmax_layer l, network_state net)
{
if(l.softmax_tree){
int i;
int count = 0;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_cpu(net.input + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output + count);
count += group_size;
}
} else {
softmax_cpu(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output);
}
if(net.truth && !l.noloss){
softmax_x_ent_cpu(l.batch*l.inputs, l.output, net.truth, l.delta, l.loss);
l.cost[0] = sum_array(l.loss, l.batch*l.inputs);
}
}
void backward_softmax_layer(const softmax_layer l, network_state net)
{
axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1);
}
#ifdef GPU
void pull_softmax_layer_output(const softmax_layer layer)
{
cuda_pull_array(layer.output_gpu, layer.output, layer.inputs*layer.batch);
}
void forward_softmax_layer_gpu(const softmax_layer l, network_state net)
{
if(l.softmax_tree){
softmax_tree_gpu(net.input, 1, l.batch, l.inputs, l.temperature, l.output_gpu, *l.softmax_tree);
/*
int i;
int count = 0;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_gpu(net.input_gpu + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output_gpu + count);
count += group_size;
}
*/
} else {
if(l.spatial){
softmax_gpu_new_api(net.input, l.c, l.batch*l.c, l.inputs/l.c, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu);
}else{
softmax_gpu_new_api(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output_gpu);
}
}
if(net.truth && !l.noloss){
softmax_x_ent_gpu(l.batch*l.inputs, l.output_gpu, net.truth, l.delta_gpu, l.loss_gpu);
if(l.softmax_tree){
mask_gpu_new_api(l.batch*l.inputs, l.delta_gpu, SECRET_NUM, net.truth, 0);
mask_gpu_new_api(l.batch*l.inputs, l.loss_gpu, SECRET_NUM, net.truth, 0);
}
cuda_pull_array(l.loss_gpu, l.loss, l.batch*l.inputs);
l.cost[0] = sum_array(l.loss, l.batch*l.inputs);
}
}
void backward_softmax_layer_gpu(const softmax_layer layer, network_state state)
{
axpy_ongpu(layer.batch*layer.inputs, state.net.loss_scale, layer.delta_gpu, 1, state.delta, 1);
}
#endif
// -------------------------------------
// Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf
contrastive_layer make_contrastive_layer(int batch, int w, int h, int c, int classes, int inputs, layer *yolo_layer)
{
contrastive_layer l = { (LAYER_TYPE)0 };
l.type = CONTRASTIVE;
l.batch = batch;
l.inputs = inputs;
l.w = w;
l.h = h;
l.c = c;
l.temperature = 1;
l.max_boxes = 0;
if (yolo_layer) {
l.detection = 1;
l.max_boxes = yolo_layer->max_boxes;
l.labels = yolo_layer->labels; // track id
l.class_ids = yolo_layer->class_ids; // class_ids
l.n = yolo_layer->n; // num of embeddings per cell = num of anchors
l.classes = yolo_layer->classes;// num of classes
classes = l.classes;
l.embedding_size = l.inputs / (l.n*l.h*l.w);
l.truths = yolo_layer->truths;
if (l.embedding_size != yolo_layer->embedding_size) {
printf(" Error: [contrastive] embedding_size=%d isn't equal to [yolo] embedding_size=%d. They should use the same [convolutional] layer \n", l.embedding_size, yolo_layer->embedding_size);
getchar();
exit(0);
}
if (l.inputs % (l.n*l.h*l.w) != 0) {
printf(" Warning: filters= number in the previous (embedding) layer isn't divisable by number of anchors %d \n", l.n);
getchar();
}
}
else {
l.detection = 0;
l.labels = (int*)xcalloc(l.batch, sizeof(int)); // labels
l.n = 1; // num of embeddings per cell
l.classes = classes; // num of classes
l.embedding_size = l.c;
}
l.outputs = inputs;
l.loss = (float*)xcalloc(1, sizeof(float));
l.output = (float*)xcalloc(inputs * batch, sizeof(float));
l.delta = (float*)xcalloc(inputs * batch, sizeof(float));
l.cost = (float*)xcalloc(1, sizeof(float));
const size_t step = l.batch*l.n*l.h*l.w;
l.cos_sim = NULL;
l.exp_cos_sim = NULL;
l.p_constrastive = NULL;
if (!l.detection) {
l.cos_sim = (float*)xcalloc(step*step, sizeof(float));
l.exp_cos_sim = (float*)xcalloc(step*step, sizeof(float));
l.p_constrastive = (float*)xcalloc(step*step, sizeof(float));
}
//l.p_constrastive = (float*)xcalloc(step*step, sizeof(float));
//l.contrast_p_size = (int*)xcalloc(1, sizeof(int));
//*l.contrast_p_size = step;
//l.contrast_p = (contrastive_params*)xcalloc(*l.contrast_p_size, sizeof(contrastive_params));
l.forward = forward_contrastive_layer;
l.backward = backward_contrastive_layer;
#ifdef GPU
l.forward_gpu = forward_contrastive_layer_gpu;
l.backward_gpu = backward_contrastive_layer_gpu;
l.output_gpu = cuda_make_array(l.output, inputs*batch);
l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
const int max_contr_size = (l.max_boxes*l.batch)*(l.max_boxes*l.batch) * sizeof(contrastive_params)/4;
printf(" max_contr_size = %d MB \n", max_contr_size / (1024*1024));
l.contrast_p_gpu = (contrastive_params *)cuda_make_array(NULL, max_contr_size);
#endif
fprintf(stderr, "contrastive %4d x%4d x%4d x emb_size %4d x batch: %4d classes = %4d, step = %4d \n", w, h, l.n, l.embedding_size, batch, l.classes, step);
if(l.detection) fprintf(stderr, "detection \n");
return l;
}
static inline float clip_value(float val, const float max_val)
{
if (val > max_val) {
//printf("\n val = %f > max_val = %f \n", val, max_val);
val = max_val;
}
else if (val < -max_val) {
//printf("\n val = %f < -max_val = %f \n", val, -max_val);
val = -max_val;
}
return val;
}
void forward_contrastive_layer(contrastive_layer l, network_state state)
{
if (!state.train) return;
const float truth_thresh = state.net.label_smooth_eps;
const int mini_batch = l.batch / l.steps;
int b, n, w, h;
fill_cpu(l.batch*l.inputs, 0, l.delta, 1);
if (!l.detection) {
for (b = 0; b < l.batch; ++b) {
if (state.net.adversarial) l.labels[b] = b % 2;
else l.labels[b] = b / 2;
}
// set labels
for (b = 0; b < l.batch; ++b) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
// find truth with max prob (only 1 label even if mosaic is used)
float max_truth = 0;
int n;
for (n = 0; n < l.classes; ++n) {
const float truth_prob = state.truth[b*l.classes + n];
//printf(" truth_prob = %f, ", truth_prob);
//if (truth_prob > max_truth)
if (truth_prob > truth_thresh)
{
//printf(" truth_prob = %f, max_truth = %f, n = %d; ", truth_prob, max_truth, n);
max_truth = truth_prob;
l.labels[b] = n;
}
}
//printf(", l.labels[b] = %d ", l.labels[b]);
}
}
}
}
//printf("\n\n");
// set pointers to features
float **z = (float**)xcalloc(l.batch*l.n*l.h*l.w, sizeof(float*));
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
if (l.labels[z_index] < 0) continue;
//const int input_index = b*l.inputs + n*l.embedding_size*l.h*l.w + h*l.w + w;
//float *ptr = state.input + input_index;
//z[z_index] = ptr;
z[z_index] = (float*)xcalloc(l.embedding_size, sizeof(float));
get_embedding(state.input, l.w, l.h, l.c, l.embedding_size, w, h, n, b, z[z_index]);
}
}
}
}
int b2, n2, h2, w2;
int contrast_p_index = 0;
const size_t step = l.batch*l.n*l.h*l.w;
size_t contrast_p_size = step;
if (!l.detection) contrast_p_size = l.batch*l.batch;
contrastive_params *contrast_p = (contrastive_params*)xcalloc(contrast_p_size, sizeof(contrastive_params));
float *max_sim_same = (float *)xcalloc(l.batch*l.inputs, sizeof(float));
float *max_sim_diff = (float *)xcalloc(l.batch*l.inputs, sizeof(float));
fill_cpu(l.batch*l.inputs, -10, max_sim_same, 1);
fill_cpu(l.batch*l.inputs, -10, max_sim_diff, 1);
// precalculate cosine similiraty
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
if (l.labels[z_index] < 0) continue;
for (b2 = 0; b2 < l.batch; ++b2) {
for (n2 = 0; n2 < l.n; ++n2) {
for (h2 = 0; h2 < l.h; ++h2) {
for (w2 = 0; w2 < l.w; ++w2)
{
const int z_index2 = b2*l.n*l.h*l.w + n2*l.h*l.w + h2*l.w + w2;
if (l.labels[z_index2] < 0) continue;
if (z_index == z_index2) continue;
if (l.detection)
if (l.class_ids[z_index] != l.class_ids[z_index2]) continue;
const int time_step_i = b / mini_batch;
const int time_step_j = b2 / mini_batch;
if (time_step_i != time_step_j) continue;
const size_t step = l.batch*l.n*l.h*l.w;
const float sim = cosine_similarity(z[z_index], z[z_index2], l.embedding_size);
const float exp_sim = expf(sim / l.temperature);
if (!l.detection) {
l.cos_sim[z_index*step + z_index2] = sim;
l.exp_cos_sim[z_index*step + z_index2] = exp_sim;
}
// calc good sim
if (l.labels[z_index] == l.labels[z_index2] && max_sim_same[z_index] < sim) max_sim_same[z_index] = sim;
if (l.labels[z_index] != l.labels[z_index2] && max_sim_diff[z_index] < sim) max_sim_diff[z_index] = sim;
//printf(" z_i = %d, z_i2 = %d, l = %d, l2 = %d, sim = %f \n", z_index, z_index2, l.labels[z_index], l.labels[z_index2], sim);
contrast_p[contrast_p_index].sim = sim;
contrast_p[contrast_p_index].exp_sim = exp_sim;
contrast_p[contrast_p_index].i = z_index;
contrast_p[contrast_p_index].j = z_index2;
contrast_p[contrast_p_index].time_step_i = time_step_i;
contrast_p[contrast_p_index].time_step_j = time_step_j;
contrast_p_index++;
//printf(" contrast_p_index = %d, contrast_p_size = %d \n", contrast_p_index, contrast_p_size);
if ((contrast_p_index+1) >= contrast_p_size) {
contrast_p_size = contrast_p_index + 1;
//printf(" contrast_p_size = %d, z_index = %d, z_index2 = %d \n", contrast_p_size, z_index, z_index2);
contrast_p = (contrastive_params*)xrealloc(contrast_p, contrast_p_size * sizeof(contrastive_params));
}
if (sim > 1.001 || sim < -1.001) {
printf(" sim = %f, ", sim); getchar();
}
}
}
}
}
}
}
}
}
// calc contrastive accuracy
int i;
int good_sims = 0, all_sims = 0, same_sim = 0, diff_sim = 0;
for (i = 0; i < l.batch*l.inputs; ++i) {
if (max_sim_same[i] >= -1 && max_sim_diff[i] >= -1) {
if (max_sim_same[i] >= -1) same_sim++;
if (max_sim_diff[i] >= -1) diff_sim++;
++all_sims;
//printf(" max_sim_diff[i] = %f, max_sim_same[i] = %f \n", max_sim_diff[i], max_sim_same[i]);
if (max_sim_diff[i] < max_sim_same[i]) good_sims++;
}
}
if (all_sims > 0) {
*l.loss = 100 * good_sims / all_sims;
}
else *l.loss = -1;
printf(" Contrast accuracy = %f %%, all = %d, good = %d, same = %d, diff = %d \n", *l.loss, all_sims, good_sims, same_sim, diff_sim);
free(max_sim_same);
free(max_sim_diff);
/*
// show near sim
float good_contrast = 0;
for (b = 0; b < l.batch; b += 2) {
float same = l.cos_sim[b*l.batch + b];
float aug = l.cos_sim[b*l.batch + b + 1];
float diff = l.cos_sim[b*l.batch + b + 2];
good_contrast += (aug > diff);
//printf(" l.labels[b] = %d, l.labels[b+1] = %d, l.labels[b+2] = %d, b = %d \n", l.labels[b], l.labels[b + 1], l.labels[b + 2], b);
//printf(" same = %f, aug = %f, diff = %f, (aug > diff) = %d \n", same, aug, diff, (aug > diff));
}
*l.loss = 100 * good_contrast / (l.batch / 2);
printf(" Contrast accuracy = %f %% \n", *l.loss);
*/
/*
// precalculate P_contrastive
for (b = 0; b < l.batch; ++b) {
int b2;
for (b2 = 0; b2 < l.batch; ++b2) {
if (b != b2) {
const float P = P_constrastive(b, b2, l.labels, l.batch, z, l.embedding_size, l.temperature, l.cos_sim);
l.p_constrastive[b*l.batch + b2] = P;
if (P > 1 || P < -1) {
printf(" p = %f, ", P); getchar();
}
}
}
}
*/
const size_t contr_size = contrast_p_index;
if (l.detection) {
#ifdef GPU
const int max_contr_size = (l.max_boxes*l.batch)*(l.max_boxes*l.batch);
if (max_contr_size < contr_size) {
printf(" Error: too large number of bboxes: contr_size = %d > max_contr_size = %d \n", contr_size, max_contr_size);
exit(0);
}
int *labels = NULL;
if (contr_size > 2) {
cuda_push_array((float *)l.contrast_p_gpu, (float *)contrast_p, contr_size * sizeof(contrastive_params) / 4);
P_constrastive_f_det_gpu(labels, l.embedding_size, l.temperature, l.contrast_p_gpu, contr_size);
cuda_pull_array((float *)l.contrast_p_gpu, (float *)contrast_p, contr_size * sizeof(contrastive_params) / 4);
}
#else // GPU
int k;
//#pragma omp parallel for
for (k = 0; k < contr_size; ++k) {
contrast_p[k].P = P_constrastive_f_det(k, l.labels, z, l.embedding_size, l.temperature, contrast_p, contr_size);
}
#endif // GPU
}
else {
// precalculate P-contrastive
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
if (l.labels[z_index] < 0) continue;
for (b2 = 0; b2 < l.batch; ++b2) {
for (n2 = 0; n2 < l.n; ++n2) {
for (h2 = 0; h2 < l.h; ++h2) {
for (w2 = 0; w2 < l.w; ++w2)
{
const int z_index2 = b2*l.n*l.h*l.w + n2*l.h*l.w + h2*l.w + w2;
if (l.labels[z_index2] < 0) continue;
if (z_index == z_index2) continue;
if (l.detection)
if (l.class_ids[z_index] != l.class_ids[z_index2]) continue;
const int time_step_i = b / mini_batch;
const int time_step_j = b2 / mini_batch;
if (time_step_i != time_step_j) continue;
const size_t step = l.batch*l.n*l.h*l.w;
float P = -10;
if (l.detection) {
P = P_constrastive_f(z_index, z_index2, l.labels, z, l.embedding_size, l.temperature, contrast_p, contr_size);
}
else {
P = P_constrastive(z_index, z_index2, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.exp_cos_sim);
l.p_constrastive[z_index*step + z_index2] = P;
}
int q;
for (q = 0; q < contr_size; ++q)
if (contrast_p[q].i == z_index && contrast_p[q].j == z_index2) {
contrast_p[q].P = P;
break;
}
//if (q == contr_size) getchar();
//if (P > 1 || P < -1) {
// printf(" p = %f, z_index = %d, z_index2 = %d ", P, z_index, z_index2); getchar();
//}
}
}
}
}
}
}
}
}
}
// calc deltas
int bd = 0;
#pragma omp parallel for
for (bd = 0; bd < l.batch; ++bd) {
for (int nd = 0; nd < l.n; ++nd) {
for (int hd = 0; hd < l.h; ++hd) {
for (int wd = 0; wd < l.w; ++wd)
{
const int z_index = bd*l.n*l.h*l.w + nd*l.h*l.w + hd*l.w + wd;
const size_t step = l.batch*l.n*l.h*l.w;
if (l.labels[z_index] < 0) continue;
const int delta_index = bd*l.embedding_size*l.n*l.h*l.w + nd*l.embedding_size*l.h*l.w + hd*l.w + wd;
const int wh = l.w*l.h;
if (l.detection) {
// detector
// positive
grad_contrastive_loss_positive_f(z_index, l.class_ids, l.labels, step, z, l.embedding_size, l.temperature, l.delta + delta_index, wh, contrast_p, contr_size);
// negative
grad_contrastive_loss_negative_f(z_index, l.class_ids, l.labels, step, z, l.embedding_size, l.temperature, l.delta + delta_index, wh, contrast_p, contr_size, l.contrastive_neg_max);
}
else {
// classifier
// positive
grad_contrastive_loss_positive(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.p_constrastive, l.delta + delta_index, wh);
// negative
grad_contrastive_loss_negative(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.p_constrastive, l.delta + delta_index, wh);
}
}
}
}
}
scal_cpu(l.inputs * l.batch, l.cls_normalizer, l.delta, 1);
for (i = 0; i < l.inputs * l.batch; ++i) {
l.delta[i] = clip_value(l.delta[i], l.max_delta);
}
*(l.cost) = pow(mag_array(l.delta, l.inputs * l.batch), 2);
if (state.net.adversarial) {
printf(" adversarial contrastive loss = %f \n\n", *(l.cost));
}
else {
printf(" contrastive loss = %f \n\n", *(l.cost));
}
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
//if (l.labels[z_index] < 0) continue;
if (z[z_index]) free(z[z_index]);
}
}
}
}
free(contrast_p);
free(z);
}
void backward_contrastive_layer(contrastive_layer l, network_state state)
{
axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, state.delta, 1);
}
#ifdef GPU
void pull_contrastive_layer_output(const contrastive_layer l)
{
cuda_pull_array(l.output_gpu, l.output, l.inputs*l.batch);
}
void push_contrastive_layer_output(const contrastive_layer l)
{
cuda_push_array(l.delta_gpu, l.delta, l.inputs*l.batch);
}
void forward_contrastive_layer_gpu(contrastive_layer l, network_state state)
{
simple_copy_ongpu(l.batch*l.inputs, state.input, l.output_gpu);
if (!state.train) return;
float *in_cpu = (float *)xcalloc(l.batch*l.inputs, sizeof(float));
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
memcpy(in_cpu, l.output, l.batch*l.outputs * sizeof(float));
float *truth_cpu = 0;
if (state.truth) {
int num_truth = l.batch*l.classes;
if (l.detection) num_truth = l.batch*l.truths;
truth_cpu = (float *)xcalloc(num_truth, sizeof(float));
cuda_pull_array(state.truth, truth_cpu, num_truth);
}
network_state cpu_state = state;
cpu_state.net = state.net;
cpu_state.index = state.index;
cpu_state.train = state.train;
cpu_state.truth = truth_cpu;
cpu_state.input = in_cpu;
forward_contrastive_layer(l, cpu_state);
cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
free(in_cpu);
if (cpu_state.truth) free(cpu_state.truth);
}
void backward_contrastive_layer_gpu(contrastive_layer layer, network_state state)
{
axpy_ongpu(layer.batch*layer.inputs, state.net.loss_scale, layer.delta_gpu, 1, state.delta, 1);
}
#endif |
GB_binop__bor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int8)
// C=scalar+B GB (_bind1st__bor_int8)
// C=scalar+B' GB (_bind1st_tran__bor_int8)
// C=A+scalar GB (_bind2nd__bor_int8)
// C=A'+scalar GB (_bind2nd_tran__bor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
expression-inl.h | #ifndef FUNDAMENTAL_FUNDAMENTAL_INL_H
#define FUNDAMENTAL_FUNDAMENTAL_INL_H
#include <fundamental/expression.h>
#include <fundamental/tensor.h>
#include <functional>
namespace expression{
template<typename xpu, index_t stream_id, typename EL, typename ER,
typename T, int tl, int tr>
TENSOR_INLINE_HOST Binary<xpu, stream_id, typename primitives::Primitives<xpu>::plus,
EL, ER, T>
operator+(const Exp<xpu, stream_id, EL, T, tl> &el,
const Exp<xpu, stream_id, ER, T, tr> &er){
return MakeExp<xpu, stream_id, typename primitives::Primitives<xpu>::plus>(el, er);
}
template<typename xpu, index_t stream_id, typename EL, typename ER,
typename T, int tl, int tr>
TENSOR_INLINE_HOST Binary<xpu, stream_id, typename primitives::Primitives<xpu>::minus,
EL, ER, T>
operator-(const Exp<xpu, stream_id, EL, T, tl> &el,
const Exp<xpu, stream_id, ER, T, tr> &er){
return MakeExp<xpu, stream_id, typename primitives::Primitives<xpu>::minus>(el, er);
}
template<typename xpu, index_t stream_id, typename EL, typename ER,
typename T, int tl, int tr>
TENSOR_INLINE_HOST Binary<xpu, stream_id, typename primitives::Primitives<xpu>::mul,
EL, ER, T>
operator*(const Exp<xpu, stream_id, EL, T, tl> &el,
const Exp<xpu, stream_id, ER, T, tr> &er){
return MakeExp<xpu, stream_id, typename primitives::Primitives<xpu>::mul>(el, er);
}
template<typename xpu, index_t stream_id, typename EL, typename ER,
typename T, int tl, int tr>
TENSOR_INLINE_HOST Binary<xpu, stream_id, typename primitives::Primitives<xpu>::div,
EL, ER, T>
operator/(const Exp<xpu, stream_id, EL, T, tl> &el,
const Exp<xpu, stream_id, ER, T, tr> &er) {
return MakeExp<xpu, stream_id, typename primitives::Primitives<xpu>::div>(el, er);
}
template<typename xpu, index_t stream_id, typename SubType, typename T,
index_t exp_type>
TENSOR_INLINE_HOST auto Exp<xpu, stream_id, SubType, T, exp_type>::eval(){
core::Tensor<xpu, stream_id, T> tensor(this->self().shape());
tensor.allocate();
Exceturer<xpu>::excetute(*this, tensor);
return tensor;
}
template<typename Esrc, typename Edst, index_t stream_id, typename T,
index_t exp_type>
TENSOR_INLINE_HOST void Exceturer<cpu>::excetute(
const Exp<cpu, stream_id, Esrc, T, exp_type>& src_exp,
const Exp<cpu, stream_id, Edst, T, type::kLvalue>& dst_exp){
DEBUG_ASSERT(src_exp.self().shape() == dst_exp.self().shape());
auto task = [](Esrc src, Edst dst){
#pragma omp simd
for (int s = 0; s < src.shape().N; ++s){
for (int i = 0; i < src.shape().stride; ++i){
dst.Set(s, i, src.Eval(s, i));
}
}
};
STREAM_FORWARD(cpu, stream_id).put(task, src_exp.self(), dst_exp.self());
}
template<typename Esrc, typename Edst, index_t stream_id, typename T,
index_t exp_typeSrc, index_t exp_typeDst>
TENSOR_INLINE_HOST void Exceturer<cpu>::backward(
const Exp<cpu, stream_id, Esrc, T, exp_typeSrc>& src_exp,
const Exp<cpu, stream_id, Edst, T, exp_typeDst>& dst_exp){
DEBUG_ASSERT(src_exp.self().shape() == dst_exp.self().shape());
auto task = [](Esrc src, Edst dst){
#pragma omp simd
for (int s = 0; s < src.shape().N; ++s){
for (int i = 0; i < src.shape().stride; ++i){
dst.Backward(s, i, src.BackwardEval(s, i));
}
}
};
STREAM_BACKWARD(cpu, stream_id).put(task, src_exp.self(), dst_exp.self());
}
}
#endif
|
allocate.c | /**
*
* @file allocate.c
*
* PLASMA auxiliary routines
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Jakub Kurzak
* @date 2010-11-15
*
**/
#include <stdlib.h>
#include "common.h"
/***************************************************************************//**
*
**/
void *plasma_shared_alloc(plasma_context_t *plasma, size_t size, int type)
{
void *memptr;
size *= plasma_element_size(type);
if (size <= 0)
return NULL;
//if (posix_memalign(&memptr, STANDARD_PAGE_SIZE, size) != 0) {
if ((memptr = malloc(size)) == NULL) {
plasma_error("plasma_shared_alloc", "posix_memalign() failed");
return NULL;
}
if ( plasma->runtime == PLASMA_OMPSS) {
#pragma omp register([size]memptr)
// printf("shared_alloc::memptr: %p[%d]\n", memptr, size);
}
return memptr;
}
/***************************************************************************//**
*
**/
void plasma_shared_free(plasma_context_t *plasma, void *ptr)
{
if (ptr == NULL) // somewhat redundant - free() does the same
return;
if ( plasma->runtime != PLASMA_OMPSS) {
free(ptr);
}
}
/***************************************************************************//**
*
**/
void *plasma_private_alloc(plasma_context_t *plasma, size_t size, int type)
{
void *memptr;
size *= plasma_element_size(type);
if (size <= 0)
return NULL;
//if (posix_memalign(&memptr, CACHE_LINE_SIZE, size) != 0) {
if ((memptr = malloc(size)) == NULL) {
plasma_error("plasma_private_alloc", "posix_memalign() failed");
return NULL;
}
if ( plasma->runtime == PLASMA_OMPSS) {
#pragma omp register([size]memptr)
// printf("private_alloc::memptr: %p[%d]\n", memptr, size);
}
return memptr;
}
/***************************************************************************//**
*
**/
void plasma_private_free(plasma_context_t *plasma, void *ptr)
{
if (ptr == NULL) // somewhat redundant - free() does the same
return;
if ( plasma->runtime != PLASMA_OMPSS) {
free(ptr);
}
}
|
GB_unaryop__minv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_uint8
// op(A') function: GB_tran__minv_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_uint8
(
uint8_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
minhash_sketch.h | #ifndef MHSKETCH_H_
#define MHSKETCH_H_
#pragma once
#include <emmintrin.h>
#include <smmintrin.h>
#include "../seq/types.h"
#include "counts_table.h"
#include "basic_map.h"
#include "mph.h"
#include <stdlib.h>
#include <math.h>
#include <unordered_map>
#if defined(_OPENMP)
#include <omp.h>
#endif
static const int BITS_IN_KMER_HASH = 64;
typedef uint64 hash_size_t;
struct minhash_fp_t {
std::vector<hash_size_t> v;
};
struct rand_range_generator_t {
int rand_in_range(int n) {
int r, rand_max = RAND_MAX - (RAND_MAX % n);
while ((r = rand()) >= rand_max);
return r / (rand_max / n);
}
};
// multi-stream minhash index
class minhash_table_t : public counts_table_t {
public:
int k;
int n_streams;
int FP_len;
int FP_proj_len;
int n_tables;
// kmer hashing, multiply-shift hash functions (Dietzfelbinger et al)
std::vector<hash_size_t> fp_hash_funcs;
std::vector<std::vector<hash_size_t> > fp_proj_funcs;
std::vector<std::vector<int> > fp_proj_ind;
typedef uint64 proj_hash_t;
std::vector<basic_table_t> tables;
std::vector<mphf_table_t> static_tables;
minhash_table_t(): minhash_table_t(0,0,0,0,0) {}
minhash_table_t(const int kmer_len, const int fp_len, const int n_proj, const int proj_len, const int n_input_streams) {
k = kmer_len;
FP_len = fp_len;
n_tables = n_proj;
FP_proj_len = proj_len;
n_streams = n_input_streams;
init_hash_funcs();
tables.resize(n_tables);
static_tables.resize(n_tables);
}
void init_hash_funcs() {
fp_hash_funcs.resize(FP_len);
for(int i = 0; i < FP_len; i++) {
hash_size_t a = 2ULL*rand() + 1ULL; // odd multipliers
fp_hash_funcs[i] = a;
}
fp_proj_funcs.resize(n_tables);
fp_proj_ind.resize(n_tables);
rand_range_generator_t rgen;
std::vector<int> idx(FP_len);
for(int i = 0; i < n_tables; i++) {
for(int k = 0; k < FP_len; k++) {
idx[k] = k;
}
// pick random indices from the sketch
fp_proj_funcs[i].resize(FP_proj_len);
fp_proj_ind[i].resize(FP_proj_len);
int cnt = 0;
int len = FP_len;
while(cnt < FP_proj_len) {
int j = rgen.rand_in_range(len); // exclude 0
fp_proj_ind[i][cnt] = idx[j];
fp_proj_funcs[i][cnt] = 2ULL * rand() + 1ULL;
idx[j] = idx[len-1];
cnt++;
len--;
}
}
}
bool compute_fp(const std::string seq, minhash_fp_t& fp) {
const int n_kmers = seq.size() - k + 1;
kmer_2bit_t v[n_kmers] __attribute__((aligned(16)));;
int n_valid_kmers = 0;
kmer_parser_t seq_parser;
seq_parser.init(seq, k);
kmer_t kmer;
while(seq_parser.get_next_kmer(kmer)) {
if(!kmer.valid) continue;
v[n_valid_kmers] = kmer.packed_rep;
n_valid_kmers++;
}
if(n_valid_kmers <= k) {
return false;
}
fp.v.resize(FP_len);
for(int h = 0; h < FP_len; h++) {
const hash_size_t s = fp_hash_funcs[h];
hash_size_t curr_min = s*v[0];
for(int i = 1; i < n_valid_kmers; i++) {
hash_size_t p = v[i]*s;
if(p < curr_min) {
curr_min = p;
}
}
fp.v[h] = curr_min;
}
return true;
}
virtual proj_hash_t compute_fp_proj(const minhash_fp_t& fp, const int proj_id) const {
proj_hash_t s = 0;
for(int i = 0; i < FP_proj_len; i++) {
s += fp_proj_funcs[proj_id][i]*fp.v[fp_proj_ind[proj_id][i]];
}
return s;
}
// ---- interface ---- //
virtual void clear() {}
virtual ~minhash_table_t() {}
virtual int get_n_streams() {
return n_streams;
}
virtual void insert(const std::string& seq, const int stream_id) {
minhash_fp_t fp;
if(!compute_fp(seq, fp)) return;
// compute the projections
#pragma omp parallel for
for(int t = 0; t < n_tables; t++) { // for each hash table
proj_hash_t key = compute_fp_proj(fp, t);
tables[t].insert(key, stream_id);
//std::cout << key << " ";
} //std::cout << "\n";
}
// lookup the kmer count in the sketch
virtual counter_t lookup(const minhash_fp_t& fp, const int stream_id) const {
std::vector<counter_t> proj_counts(n_tables);
for(int t = 0; t < n_tables; t++) { // for each hash table
proj_hash_t key = compute_fp_proj(fp, t);
proj_counts[t] = static_tables[t].lookup(key, stream_id);
}
// return median
std::sort(proj_counts.begin(), proj_counts.end());
return proj_counts[proj_counts.size()/2];
}
virtual void done() {
#pragma omp parallel for
for(int t = 0; t < n_tables; t++) { // for each hash table
std::vector<kmer_2bit_t> keys;
std::vector<counter_t> key_counts;
tables[t].get_key_values(keys, key_counts);
tables[t].clear();
static_tables[t].init(keys, key_counts);
}
std::vector<basic_table_t>().swap(tables);
}
// write the table to file
virtual void save_to_file(const std::string& fname, const int n_count_bits) {
std::ofstream file;
file.open(fname.c_str(), std::ios::out | std::ios::binary | std::ios::app);
file.write(reinterpret_cast<char*>(&n_streams), sizeof(n_streams));
file.write(reinterpret_cast<char*>(&k), sizeof(k));
file.write(reinterpret_cast<char*>(&n_tables), sizeof(n_tables));
file.write(reinterpret_cast<char*>(&FP_len), sizeof(FP_len));
file.write(reinterpret_cast<char*>(&FP_proj_len), sizeof(FP_proj_len));
file.close();
for(int t = 0; t < n_tables; t++) { // for each hash table
static_tables[t].save_to_file(fname, n_count_bits);
}
}
// load the table from file
virtual long int load_from_file(const std::string& fname, long int file_offset) {
std::ifstream file;
file.open(fname.c_str(), std::ios::in | std::ios::binary);
file.seekg(file_offset, file.beg);
file.read(reinterpret_cast<char*>(&n_streams), sizeof(n_streams));
file.read(reinterpret_cast<char*>(&k), sizeof(k));
file.read(reinterpret_cast<char*>(&n_tables), sizeof(n_tables));
file.read(reinterpret_cast<char*>(&FP_len), sizeof(FP_len));
file.read(reinterpret_cast<char*>(&FP_proj_len), sizeof(FP_proj_len));
long int s = file.tellg();
file.close();
init_hash_funcs();
tables.resize(n_tables);
static_tables.resize(n_tables);
std::cout << "Minhash config: " << FP_len << " " << n_tables << "\n";
for(int t = 0; t < n_tables; t++) { // for each hash table
s = static_tables[t].load_from_file(fname, s);
}
//init_hash_funcs();
std::cout << "Loaded index \n";
return s;
}
virtual void print_stats() {
for(int t = 0; t < n_tables; t++) {
//tables[t].print_stats();
}
}
virtual counter_t lookup(const kmer_2bit_t& kmer, const int stream_id) const {
return 0;
}
virtual void insert(const kmer_2bit_t& kmer, const int stream_id) {}
};
#endif
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% John Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/xml-tree.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,
% const size_t width,const size_t height,
% const ssize_t offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o offset: the mean offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const ssize_t offset,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
number_pixels;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse)
{
InheritException(exception,&threshold_image->exception);
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Local adaptive threshold.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&zero);
number_pixels=(MagickRealType) (width*height);
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
channel_bias,
channel_sum;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p,
*restrict r;
register IndexPacket
*restrict threshold_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
ssize_t
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
height/2L,image->columns+width,height,exception);
q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view);
channel_bias=zero;
channel_sum=zero;
r=p;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
{
channel_bias.red+=r[u].red;
channel_bias.green+=r[u].green;
channel_bias.blue+=r[u].blue;
channel_bias.opacity+=r[u].opacity;
if (image->colorspace == CMYKColorspace)
channel_bias.index=(MagickRealType)
GetPixelIndex(indexes+(r-p)+u);
}
channel_sum.red+=r[u].red;
channel_sum.green+=r[u].green;
channel_sum.blue+=r[u].blue;
channel_sum.opacity+=r[u].opacity;
if (image->colorspace == CMYKColorspace)
channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u);
}
r+=image->columns+width;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
mean;
mean=zero;
r=p;
channel_sum.red-=channel_bias.red;
channel_sum.green-=channel_bias.green;
channel_sum.blue-=channel_bias.blue;
channel_sum.opacity-=channel_bias.opacity;
channel_sum.index-=channel_bias.index;
channel_bias=zero;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias.red+=r[0].red;
channel_bias.green+=r[0].green;
channel_bias.blue+=r[0].blue;
channel_bias.opacity+=r[0].opacity;
if (image->colorspace == CMYKColorspace)
channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0);
channel_sum.red+=r[width-1].red;
channel_sum.green+=r[width-1].green;
channel_sum.blue+=r[width-1].blue;
channel_sum.opacity+=r[width-1].opacity;
if (image->colorspace == CMYKColorspace)
channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+
width-1);
r+=image->columns+width;
}
mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset);
mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset);
mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset);
mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset);
if (image->colorspace == CMYKColorspace)
mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset);
SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ?
0 : QuantumRange);
SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ?
0 : QuantumRange);
SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ?
0 : QuantumRange);
SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ?
0 : QuantumRange);
if (image->colorspace == CMYKColorspace)
SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex(
threshold_indexes+x) <= mean.index) ? 0 : QuantumRange));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(threshold_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImageChannel method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold)
% MagickBooleanType BilevelImageChannel(Image *image,
% const ChannelType channel,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o threshold: define the threshold values.
%
% Aside: You can get the same results as operator using LevelImageChannels()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold)
{
MagickBooleanType
status;
status=BilevelImageChannel(image,DefaultChannels,threshold);
return(status);
}
MagickExport MagickBooleanType BilevelImageChannel(Image *image,
const ChannelType channel,const double threshold)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 :
QuantumRange);
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 :
QuantumRange);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 :
QuantumRange);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 :
QuantumRange);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <=
threshold ? 0 : QuantumRange);
else
SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <=
threshold ? OpaqueOpacity : TransparentOpacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <=
threshold ? 0 : QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BilevelImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,const char *threshold)
% MagickBooleanType BlackThresholdImageChannel(Image *image,
% const ChannelType channel,const char *threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *threshold)
{
MagickBooleanType
status;
status=BlackThresholdImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
threshold.opacity=geometry_info.psi;
if ((flags & PsiValue) == 0)
threshold.opacity=threshold.red;
threshold.index=geometry_info.chi;
if ((flags & ChiValue) == 0)
threshold.index=threshold.red;
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.opacity*=(MagickRealType) (QuantumRange/100.0);
threshold.index*=(MagickRealType) (QuantumRange/100.0);
}
if ((IsMagickGray(&threshold) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
/*
Black threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (((channel & RedChannel) != 0) &&
((MagickRealType) GetPixelRed(q) < threshold.red))
SetPixelRed(q,0);
if (((channel & GreenChannel) != 0) &&
((MagickRealType) GetPixelGreen(q) < threshold.green))
SetPixelGreen(q,0);
if (((channel & BlueChannel) != 0) &&
((MagickRealType) GetPixelBlue(q) < threshold.blue))
SetPixelBlue(q,0);
if (((channel & OpacityChannel) != 0) &&
((MagickRealType) GetPixelOpacity(q) < threshold.opacity))
SetPixelOpacity(q,0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
((MagickRealType) GetPixelIndex(indexes+x) < threshold.index))
SetPixelIndex(indexes+x,0);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlackThresholdImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImageChannel method is:
%
% MagickBooleanType ClampImage(Image *image)
% MagickBooleanType ClampImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
*/
static inline Quantum ClampPixel(const MagickRealType value)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
return((Quantum) value);
#else
if (value < 0.0f)
return(0.0f);
if (value >= (MagickRealType) QuantumRange)
return((Quantum) QuantumRange);
return(value);
#endif
}
MagickExport MagickBooleanType ClampImage(Image *image)
{
MagickBooleanType
status;
status=ClampImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType ClampImageChannel(Image *image,
const ChannelType channel)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
SetPixelRed(q,ClampPixel(GetPixelRed(q)));
SetPixelGreen(q,ClampPixel(GetPixelGreen(q)));
SetPixelBlue(q,ClampPixel(GetPixelBlue(q)));
SetPixelOpacity(q,ClampPixel(GetPixelOpacity(q)));
q++;
}
return(SyncImage(image));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampPixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampPixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampPixel(GetPixelBlue(q)));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampPixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampPixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClampImageChannel)
#endif
proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMapFile(const char *xml,
const char *filename,const char *map_id,ExceptionInfo *exception)
{
const char
*attr,
*content;
double
value;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
map = (ThresholdMap *)NULL;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *)NULL )
return(map);
for( threshold = GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *)NULL;
threshold = GetNextXMLTreeTag(threshold) ) {
attr = GetXMLTreeAttribute(threshold, "map");
if ( (attr != (char *)NULL) && (LocaleCompare(map_id,attr) == 0) )
break;
attr = GetXMLTreeAttribute(threshold, "alias");
if ( (attr != (char *)NULL) && (LocaleCompare(map_id,attr) == 0) )
break;
}
if ( threshold == (XMLTreeInfo *)NULL ) {
return(map);
}
description = GetXMLTreeChild(threshold,"description");
if ( description == (XMLTreeInfo *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
return(map);
}
levels = GetXMLTreeChild(threshold,"levels");
if ( levels == (XMLTreeInfo *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
return(map);
}
/* The map has been found -- Allocate a Threshold Map to return */
map = (ThresholdMap *)AcquireMagickMemory(sizeof(ThresholdMap));
if ( map == (ThresholdMap *)NULL )
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
map->map_id = (char *)NULL;
map->description = (char *)NULL;
map->levels = (ssize_t *) NULL;
/* Assign Basic Attributes */
attr = GetXMLTreeAttribute(threshold, "map");
if ( attr != (char *)NULL )
map->map_id = ConstantString(attr);
content = GetXMLTreeContent(description);
if ( content != (char *)NULL )
map->description = ConstantString(content);
attr = GetXMLTreeAttribute(levels, "width");
if ( attr == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
map->width = StringToUnsignedLong(attr);
if ( map->width == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
attr = GetXMLTreeAttribute(levels, "height");
if ( attr == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
map->height = StringToUnsignedLong(attr);
if ( map->height == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
attr = GetXMLTreeAttribute(levels, "divisor");
if ( attr == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
map->divisor = (ssize_t) StringToLong(attr);
if ( map->divisor < 2 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
/* Allocate theshold levels array */
content = GetXMLTreeContent(levels);
if ( content == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if ( map->levels == (ssize_t *)NULL )
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
{ /* parse levels into integer array */
ssize_t i;
char *p;
for( i=0; i< (ssize_t) (map->width*map->height); i++) {
map->levels[i] = (ssize_t)strtol(content, &p, 10);
if ( p == content ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"", map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
if ( map->levels[i] < 0 || map->levels[i] > map->divisor ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds = DestroyXMLTree(thresholds);
map = DestroyThresholdMap(map);
return(map);
}
content = p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
}
thresholds = DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() load and search one or more threshold map files for the
% a map matching the given name or aliase.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return((ThresholdMap *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
XMLTreeInfo *thresholds,*threshold,*description;
const char *map,*alias,*content;
assert( xml != (char *)NULL );
assert( file != (FILE *)NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *)NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
for( threshold = GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *)NULL;
threshold = GetNextXMLTreeTag(threshold) )
{
map = GetXMLTreeAttribute(threshold, "map");
if (map == (char *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias = GetXMLTreeAttribute(threshold, "alias");
/* alias is optional, no if test needed */
description=GetXMLTreeChild(threshold,"description");
if ( description == (XMLTreeInfo *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if ( content == (char *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickFalse;
if ( file == (FILE *)NULL )
file = stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status|=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() uses the ordered dithering technique of reducing color
% images to monochrome using positional information to retain as much
% information as possible.
%
% WARNING: This function is deprecated, and is now just a call to
% the more more powerful OrderedPosterizeImage(); function.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image)
% MagickBooleanType OrderedDitherImageChannel(Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image)
{
MagickBooleanType
status;
status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception);
return(status);
}
MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Call the augumented function OrderedPosterizeImage()
*/
status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedPosterizeImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedPosterizeImage method is:
%
% MagickBooleanType OrderedPosterizeImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
% MagickBooleanType OrderedPosterizeImageChannel(Image *image,
% const ChannelType channel,const char *threshold_map,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedPosterizeImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map,
exception);
return(status);
}
MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image,
const ChannelType channel,const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
LongPixelPacket
levels;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
{
char
token[MaxTextExtent];
register const char
*p;
p=(char *)threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0')) {
if ((p-threshold_map) >= (MaxTextExtent-1))
break;
token[p-threshold_map] = *p;
p++;
}
token[p-threshold_map] = '\0';
map = GetThresholdMap(token, exception);
if ( map == (ThresholdMap *)NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
}
/* Set channel levels from extra comma separated arguments
Default to 2, the single value given, or individual channel values
*/
#if 1
{ /* parse directly as a comma separated list of integers */
char *p;
p = strchr((char *) threshold_map,',');
if ( p != (char *)NULL && isdigit((int) ((unsigned char) *(++p))) )
levels.index = (unsigned int) strtoul(p, &p, 10);
else
levels.index = 2;
levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0;
levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0;
levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0;
levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0;
levels.index = ((channel & IndexChannel) != 0
&& (image->colorspace == CMYKColorspace)) ? levels.index : 0;
/* if more than a single number, each channel has a separate value */
if ( p != (char *) NULL && *p == ',' ) {
p=strchr((char *) threshold_map,',');
p++;
if ((channel & RedChannel) != 0)
levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & GreenChannel) != 0)
levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & BlueChannel) != 0)
levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace)
levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & OpacityChannel) != 0)
levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
}
}
#else
/* Parse level values as a geometry */
/* This difficult!
* How to map GeometryInfo structure elements into
* LongPixelPacket structure elements, but according to channel?
* Note the channels list may skip elements!!!!
* EG -channel BA -ordered-dither map,2,3
* will need to map g.rho -> l.blue, and g.sigma -> l.opacity
* A simpler way is needed, probably converting geometry to a temporary
* array, then using channel to advance the index into ssize_t pixel packet.
*/
#endif
#if 0
printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n",
levels.red, levels.green, levels.blue, levels.opacity, levels.index);
#endif
{ /* Do the posterized ordered dithering of the image */
ssize_t
d;
/* d = number of psuedo-level divisions added between color levels */
d = map->divisor-1;
/* reduce levels to levels - 1 */
levels.red = levels.red ? levels.red-1 : 0;
levels.green = levels.green ? levels.green-1 : 0;
levels.blue = levels.blue ? levels.blue-1 : 0;
levels.opacity = levels.opacity ? levels.opacity-1 : 0;
levels.index = levels.index ? levels.index-1 : 0;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
threshold,
t,
l;
/*
Figure out the dither threshold for this pixel
This must be a integer from 1 to map->divisor-1
*/
threshold = map->levels[(x%map->width) +map->width*(y%map->height)];
/* Dither each channel in the image as appropriate
Notes on the integer Math...
total number of divisions = (levels-1)*(divisor-1)+1)
t1 = this colors psuedo_level =
q->red * total_divisions / (QuantumRange+1)
l = posterization level 0..levels
t = dither threshold level 0..divisor-1 NB: 0 only on last
Each color_level is of size QuantumRange / (levels-1)
NB: All input levels and divisor are already had 1 subtracted
Opacity is inverted so 'off' represents transparent.
*/
if (levels.red) {
t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1));
l = t/d; t = t-l*d;
SetPixelRed(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red)));
}
if (levels.green) {
t = (ssize_t) (QuantumScale*GetPixelGreen(q)*
(levels.green*d+1));
l = t/d; t = t-l*d;
SetPixelGreen(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green)));
}
if (levels.blue) {
t = (ssize_t) (QuantumScale*GetPixelBlue(q)*
(levels.blue*d+1));
l = t/d; t = t-l*d;
SetPixelBlue(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue)));
}
if (levels.opacity) {
t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))*
(levels.opacity*d+1));
l = t/d; t = t-l*d;
SetPixelOpacity(q,ClampToQuantum((MagickRealType)
((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/
levels.opacity)));
}
if (levels.index) {
t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)*
(levels.index*d+1));
l = t/d; t = t-l*d;
SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+
(t>=threshold))*(MagickRealType) QuantumRange/levels.index)));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OrderedPosterizeImageChannel)
#endif
proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImageChannel method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon)
% MagickBooleanType PerceptibleImageChannel(Image *image,
% const ChannelType channel,const double epsilon)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon)
{
MagickBooleanType
status;
status=PerceptibleImageChannel(image,DefaultChannels,epsilon);
return(status);
}
MagickExport MagickBooleanType PerceptibleImageChannel(Image *image,
const ChannelType channel,const double epsilon)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon));
SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon));
SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon));
SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon));
q++;
}
return(SyncImage(image));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x),
epsilon));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PerceptibleImageChannel)
#endif
proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImageChannel(Image *image,
% const char *thresholds,ExceptionInfo *exception)
% MagickBooleanType RandomThresholdImageChannel(Image *image,
% const ChannelType channel,const char *thresholds,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o thresholds: a geometry string containing low,high thresholds. If the
% string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4
% is performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=RandomThresholdImageChannel(image,DefaultChannels,thresholds,
exception);
return(status);
}
MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickStatusType
flags;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickRealType
min_threshold,
max_threshold;
RandomInfo
**restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (thresholds == (const char *) NULL)
return(MagickTrue);
GetMagickPixelPacket(image,&threshold);
min_threshold=0.0;
max_threshold=(MagickRealType) QuantumRange;
flags=ParseGeometry(thresholds,&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(thresholds,'%') != (char *) NULL)
{
max_threshold*=(MagickRealType) (0.01*QuantumRange);
min_threshold*=(MagickRealType) (0.01*QuantumRange);
}
else
if (((max_threshold == min_threshold) || (max_threshold == 1)) &&
(min_threshold <= 8))
{
/*
Backward Compatibility -- ordered-dither -- IM v 6.2.9-6.
*/
status=OrderedPosterizeImageChannel(image,channel,thresholds,exception);
return(status);
}
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
if (channel == CompositeChannels)
{
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
IndexPacket
index;
MagickRealType
intensity;
intensity=GetPixelIntensity(image,q);
if (intensity < min_threshold)
threshold.index=min_threshold;
else if (intensity > max_threshold)
threshold.index=max_threshold;
else
threshold.index=(MagickRealType)(QuantumRange*
GetPseudoRandomValue(random_info[id]));
index=(IndexPacket) (intensity <= threshold.index ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RandomThresholdImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if ((MagickRealType) GetPixelRed(q) < min_threshold)
threshold.red=min_threshold;
else
if ((MagickRealType) GetPixelRed(q) > max_threshold)
threshold.red=max_threshold;
else
threshold.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & GreenChannel) != 0)
{
if ((MagickRealType) GetPixelGreen(q) < min_threshold)
threshold.green=min_threshold;
else
if ((MagickRealType) GetPixelGreen(q) > max_threshold)
threshold.green=max_threshold;
else
threshold.green=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & BlueChannel) != 0)
{
if ((MagickRealType) GetPixelBlue(q) < min_threshold)
threshold.blue=min_threshold;
else
if ((MagickRealType) GetPixelBlue(q) > max_threshold)
threshold.blue=max_threshold;
else
threshold.blue=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & OpacityChannel) != 0)
{
if ((MagickRealType) GetPixelOpacity(q) < min_threshold)
threshold.opacity=min_threshold;
else
if ((MagickRealType) GetPixelOpacity(q) > max_threshold)
threshold.opacity=max_threshold;
else
threshold.opacity=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold)
threshold.index=min_threshold;
else
if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold)
threshold.index=max_threshold;
else
threshold.index=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ?
0 : QuantumRange);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ?
0 : QuantumRange);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ?
0 : QuantumRange);
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <=
threshold.opacity ? 0 : QuantumRange);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <=
threshold.index ? 0 : QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RandomThresholdImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold)
% MagickBooleanType WhiteThresholdImageChannel(Image *image,
% const ChannelType channel,const char *threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *threshold)
{
MagickBooleanType
status;
status=WhiteThresholdImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
flags=ParseGeometry(thresholds,&geometry_info);
GetMagickPixelPacket(image,&threshold);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
threshold.opacity=geometry_info.psi;
if ((flags & PsiValue) == 0)
threshold.opacity=threshold.red;
threshold.index=geometry_info.chi;
if ((flags & ChiValue) == 0)
threshold.index=threshold.red;
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.opacity*=(MagickRealType) (QuantumRange/100.0);
threshold.index*=(MagickRealType) (QuantumRange/100.0);
}
if ((IsMagickGray(&threshold) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (((channel & RedChannel) != 0) &&
((MagickRealType) GetPixelRed(q) > threshold.red))
SetPixelRed(q,QuantumRange);
if (((channel & GreenChannel) != 0) &&
((MagickRealType) GetPixelGreen(q) > threshold.green))
SetPixelGreen(q,QuantumRange);
if (((channel & BlueChannel) != 0) &&
((MagickRealType) GetPixelBlue(q) > threshold.blue))
SetPixelBlue(q,QuantumRange);
if (((channel & OpacityChannel) != 0) &&
((MagickRealType) GetPixelOpacity(q) > threshold.opacity))
SetPixelOpacity(q,QuantumRange);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index)
SetPixelIndex(indexes+x,QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WhiteThresholdImageChannel)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
ccv_bbf.c | #include "ccv.h"
#include "ccv_internal.h"
#include <sys/time.h>
#ifdef HAVE_GSL
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#endif
#ifdef USE_OPENMP
#include <omp.h>
#endif
const ccv_bbf_param_t ccv_bbf_default_params = {
.interval = 5,
.min_neighbors = 2,
.accurate = 1,
.flags = 0,
.size = {
24,
24,
},
};
#define _ccv_width_padding(x) (((x) + 3) & -4)
static inline int _ccv_run_bbf_feature(ccv_bbf_feature_t* feature, int* step, unsigned char** u8)
{
#define pf_at(i) (*(u8[feature->pz[i]] + feature->px[i] + feature->py[i] * step[feature->pz[i]]))
#define nf_at(i) (*(u8[feature->nz[i]] + feature->nx[i] + feature->ny[i] * step[feature->nz[i]]))
unsigned char pmin = pf_at(0), nmax = nf_at(0);
/* check if every point in P > every point in N, and take a shortcut */
if (pmin <= nmax)
return 0;
int i;
for (i = 1; i < feature->size; i++)
{
if (feature->pz[i] >= 0)
{
int p = pf_at(i);
if (p < pmin)
{
if (p <= nmax)
return 0;
pmin = p;
}
}
if (feature->nz[i] >= 0)
{
int n = nf_at(i);
if (n > nmax)
{
if (pmin <= n)
return 0;
nmax = n;
}
}
}
#undef pf_at
#undef nf_at
return 1;
}
static int _ccv_read_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* r = fopen(file, "r");
if (r == 0) return -1;
int stat = 0;
stat |= fscanf(r, "%d", &classifier->count);
union { float fl; int i; } fli;
stat |= fscanf(r, "%d", &fli.i);
classifier->threshold = fli.fl;
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
int i, j;
for (i = 0; i < classifier->count; i++)
{
stat |= fscanf(r, "%d", &classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].px[j], &classifier->feature[i].py[j], &classifier->feature[i].pz[j]);
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].nx[j], &classifier->feature[i].ny[j], &classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
stat |= fscanf(r, "%d %d", &flia.i, &flib.i);
classifier->alpha[i * 2] = flia.fl;
classifier->alpha[i * 2 + 1] = flib.fl;
}
fclose(r);
return 0;
}
#ifdef HAVE_GSL
static unsigned int _ccv_bbf_time_measure()
{
struct timeval tv;
gettimeofday(&tv, 0);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
#define less_than(a, b, aux) ((a) < (b))
CCV_IMPLEMENT_QSORT(_ccv_sort_32f, float, less_than)
#undef less_than
static void _ccv_bbf_eval_data(ccv_bbf_stage_classifier_t* classifier, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, float* peval, float* neval)
{
int i, j;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
peval[i] = sum;
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
neval[i] = sum;
}
}
static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size)
{
float* peval = (float*)ccmalloc(posnum * sizeof(float));
int i, j, k, rpos = posnum;
for (i = 0; i < cascade->count; i++)
{
_ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0);
k = 0;
for (j = 0; j < rpos; j++)
if (peval[j] >= cascade->stage_classifier[i].threshold)
{
posdata[k] = posdata[j];
++k;
} else {
ccfree(posdata[j]);
}
rpos = k;
}
ccfree(peval);
return rpos;
}
static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum)
{
int t, i, j, k, q;
int negperbg;
int negtotal = 0;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs1 = steps[1] * (cascade->size.height >> 1);
int isizs2 = steps[2] * (cascade->size.height >> 2);
int* idcheck = (int*)ccmalloc(negnum * sizeof(int));
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
gsl_rng_set(rng, (unsigned long int)idcheck);
ccv_size_t imgsz = cascade->size;
int rneg = negtotal;
for (t = 0; negtotal < negnum; t++)
{
PRINT(CCV_CLI_INFO, "preparing negative data ... 0%%");
for (i = 0; i < bgnum; i++)
{
negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal;
ccv_dense_matrix_t* image = 0;
ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
assert((image->type & CCV_C1) && (image->type & CCV_8U));
if (image == 0)
{
PRINT(CCV_CLI_ERROR, "\n%s file corrupted\n", bgfiles[i]);
continue;
}
if (t % 2 != 0)
ccv_flip(image, 0, 0, CCV_FLIP_X);
if (t % 4 >= 2)
ccv_flip(image, 0, 0, CCV_FLIP_Y);
ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size };
ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params);
memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int));
for (j = 0; j < ccv_min(detected->rnum, negperbg); j++)
{
int r = gsl_rng_uniform_int(rng, detected->rnum);
int flag = 1;
ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r);
while (flag) {
flag = 0;
for (k = 0; k < j; k++)
if (r == idcheck[k])
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
break;
}
rect = (ccv_rect_t*)ccv_array_get(detected, r);
if ((rect->x < 0) || (rect->y < 0) || (rect->width + rect->x > image->cols) || (rect->height + rect->y > image->rows))
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
}
}
idcheck[j] = r;
ccv_dense_matrix_t* temp = 0;
ccv_dense_matrix_t* imgs0 = 0;
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
ccv_slice(image, (ccv_matrix_t**)&temp, 0, rect->y, rect->x, rect->height, rect->width);
ccv_resample(temp, &imgs0, 0, imgsz.height, imgsz.width, CCV_INTER_AREA);
assert(imgs0->step == steps[0]);
ccv_matrix_free(temp);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
assert(imgs1->step == steps[1]);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
assert(imgs2->step == steps[2]);
negdata[negtotal] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
unsigned char* u8s0 = negdata[negtotal];
unsigned char* u8s1 = negdata[negtotal] + isizs0;
unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1;
unsigned char* u8[] = { u8s0, u8s1, u8s2 };
memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step);
ccv_matrix_free(imgs0);
memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step);
ccv_matrix_free(imgs1);
memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step);
ccv_matrix_free(imgs2);
flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (k = 0; k < cascade->count; ++k, ++classifier)
{
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (!flag)
ccfree(negdata[negtotal]);
else {
++negtotal;
if (negtotal >= negnum)
break;
}
}
ccv_array_free(detected);
ccv_matrix_free(image);
ccv_drain_cache();
PRINT(CCV_CLI_INFO, "\rpreparing negative data ... %2d%%", 100 * negtotal / negnum);
fflush(0);
if (negtotal >= negnum)
break;
}
if (rneg == negtotal)
break;
rneg = negtotal;
PRINT(CCV_CLI_INFO, "\nentering additional round %d\n", t + 1);
}
gsl_rng_free(rng);
ccfree(idcheck);
ccv_drain_cache();
PRINT(CCV_CLI_INFO, "\n");
return negtotal;
}
static void _ccv_prepare_positive_data(ccv_dense_matrix_t** posimg, unsigned char** posdata, ccv_size_t size, int posnum)
{
PRINT(CCV_CLI_INFO, "preparing positive data ... 0%%");
int i;
for (i = 0; i < posnum; i++)
{
ccv_dense_matrix_t* imgs0 = posimg[i];
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
assert((imgs0->type & CCV_C1) && (imgs0->type & CCV_8U) && imgs0->rows == size.height && imgs0->cols == size.width);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
int isizs0 = imgs0->rows * imgs0->step;
int isizs1 = imgs1->rows * imgs1->step;
int isizs2 = imgs2->rows * imgs2->step;
posdata[i] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
memcpy(posdata[i], imgs0->data.u8, isizs0);
memcpy(posdata[i] + isizs0, imgs1->data.u8, isizs1);
memcpy(posdata[i] + isizs0 + isizs1, imgs2->data.u8, isizs2);
PRINT(CCV_CLI_INFO, "\rpreparing positive data ... %2d%%", 100 * (i + 1) / posnum);
fflush(0);
ccv_matrix_free(imgs1);
ccv_matrix_free(imgs2);
}
ccv_drain_cache();
PRINT(CCV_CLI_INFO, "\n");
}
typedef struct {
double fitness;
int pk, nk;
int age;
double error;
ccv_bbf_feature_t feature;
} ccv_bbf_gene_t;
static inline void _ccv_bbf_genetic_fitness(ccv_bbf_gene_t* gene)
{
gene->fitness = (1 - gene->error) * exp(-0.01 * gene->age) * exp((gene->pk + gene->nk) * log(1.015));
}
static inline int _ccv_bbf_exist_gene_feature(ccv_bbf_gene_t* gene, int x, int y, int z)
{
int i;
for (i = 0; i < gene->pk; i++)
if (z == gene->feature.pz[i] && x == gene->feature.px[i] && y == gene->feature.py[i])
return 1;
for (i = 0; i < gene->nk; i++)
if (z == gene->feature.nz[i] && x == gene->feature.nx[i] && y == gene->feature.ny[i])
return 1;
return 0;
}
static inline void _ccv_bbf_randomize_gene(gsl_rng* rng, ccv_bbf_gene_t* gene, int* rows, int* cols)
{
int i;
do {
gene->pk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
gene->nk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
} while (gene->pk + gene->nk < CCV_BBF_POINT_MIN); /* a hard restriction of at least 3 points have to be examed */
gene->feature.size = ccv_max(gene->pk, gene->nk);
gene->age = 0;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
{
gene->feature.pz[i] = -1;
gene->feature.nz[i] = -1;
}
int x, y, z;
for (i = 0; i < gene->pk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.pz[i] = z;
gene->feature.px[i] = x;
gene->feature.py[i] = y;
}
for (i = 0; i < gene->nk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while ( _ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.nz[i] = z;
gene->feature.nx[i] = x;
gene->feature.ny[i] = y;
}
}
static inline double _ccv_bbf_error_rate(ccv_bbf_feature_t* feature, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
double error = 0;
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
if (!_ccv_run_bbf_feature(feature, steps, u8))
error += pw[i];
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
if ( _ccv_run_bbf_feature(feature, steps, u8))
error += nw[i];
}
return error;
}
#define less_than(fit1, fit2, aux) ((fit1).fitness >= (fit2).fitness)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_genetic_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_feature_t _ccv_bbf_genetic_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, int ftnum, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_feature_t best;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j;
int pnum = ftnum * 100;
assert(pnum > 0);
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc(pnum * sizeof(ccv_bbf_gene_t));
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
for (i = 0; i < pnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
double best_err = 1;
int rnum = ftnum * 39; /* number of randomize */
int mnum = ftnum * 40; /* number of mutation */
int hnum = ftnum * 20; /* number of hybrid */
/* iteration stop crit : best no change in 40 iterations */
int it = 0, t;
for (t = 0 ; it < 40; ++it, ++t)
{
int min_id = 0;
double min_err = gene[0].error;
for (i = 1; i < pnum; i++)
if (gene[i].error < min_err)
{
min_id = i;
min_err = gene[i].error;
}
min_err = gene[min_id].error = _ccv_bbf_error_rate(&gene[min_id].feature, posdata, posnum, negdata, negnum, size, pw, nw);
if (min_err < best_err)
{
best_err = min_err;
memcpy(&best, &gene[min_id].feature, sizeof(best));
PRINT(CCV_CLI_INFO, "best bbf feature with error %f\n|-size: %d\n|-positive point: ", best_err, best.size);
for (i = 0; i < best.size; i++)
PRINT(CCV_CLI_INFO, "(%d %d %d), ", best.px[i], best.py[i], best.pz[i]);
PRINT(CCV_CLI_INFO, "\n|-negative point: ");
for (i = 0; i < best.size; i++)
PRINT(CCV_CLI_INFO, "(%d %d %d), ", best.nx[i], best.ny[i], best.nz[i]);
PRINT(CCV_CLI_INFO, "\n");
it = 0;
}
PRINT(CCV_CLI_INFO, "minimum error achieved in round %d(%d) : %f with %d ms\n", t, it, min_err, timer / 1000);
_ccv_bbf_genetic_qsort(gene, pnum, 0);
for (i = 0; i < ftnum; i++)
++gene[i].age;
for (i = ftnum; i < ftnum + mnum; i++)
{
int parent = gsl_rng_uniform_int(rng, ftnum);
memcpy(gene + i, gene + parent, sizeof(ccv_bbf_gene_t));
/* three mutation strategy : 1. add, 2. remove, 3. refine */
int pnm, pn = gsl_rng_uniform_int(rng, 2);
int* pnk[] = { &gene[i].pk, &gene[i].nk };
int* pnx[] = { gene[i].feature.px, gene[i].feature.nx };
int* pny[] = { gene[i].feature.py, gene[i].feature.ny };
int* pnz[] = { gene[i].feature.pz, gene[i].feature.nz };
int x, y, z;
int victim, decay = 1;
do {
switch (gsl_rng_uniform_int(rng, 3))
{
case 0: /* add */
if (gene[i].pk == CCV_BBF_POINT_MAX && gene[i].nk == CCV_BBF_POINT_MAX)
break;
while (*pnk[pn] + 1 > CCV_BBF_POINT_MAX)
pn = gsl_rng_uniform_int(rng, 2);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][*pnk[pn]] = z;
pnx[pn][*pnk[pn]] = x;
pny[pn][*pnk[pn]] = y;
++(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 1: /* remove */
if (gene[i].pk + gene[i].nk <= CCV_BBF_POINT_MIN) /* at least 3 points have to be examed */
break;
while (*pnk[pn] - 1 <= 0) // || *pnk[pn] + *pnk[!pn] - 1 < CCV_BBF_POINT_MIN)
pn = gsl_rng_uniform_int(rng, 2);
victim = gsl_rng_uniform_int(rng, *pnk[pn]);
for (j = victim; j < *pnk[pn] - 1; j++)
{
pnz[pn][j] = pnz[pn][j + 1];
pnx[pn][j] = pnx[pn][j + 1];
pny[pn][j] = pny[pn][j + 1];
}
pnz[pn][*pnk[pn] - 1] = -1;
--(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 2: /* refine */
pnm = gsl_rng_uniform_int(rng, *pnk[pn]);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][pnm] = z;
pnx[pn][pnm] = x;
pny[pn][pnm] = y;
decay = gene[i].age = 0;
break;
}
} while (decay);
}
for (i = ftnum + mnum; i < ftnum + mnum + hnum; i++)
{
/* hybrid strategy: taking positive points from dad, negative points from mum */
int dad, mum;
do {
dad = gsl_rng_uniform_int(rng, ftnum);
mum = gsl_rng_uniform_int(rng, ftnum);
} while (dad == mum || gene[dad].pk + gene[mum].nk < CCV_BBF_POINT_MIN); /* at least 3 points have to be examed */
for (j = 0; j < CCV_BBF_POINT_MAX; j++)
{
gene[i].feature.pz[j] = -1;
gene[i].feature.nz[j] = -1;
}
gene[i].pk = gene[dad].pk;
for (j = 0; j < gene[i].pk; j++)
{
gene[i].feature.pz[j] = gene[dad].feature.pz[j];
gene[i].feature.px[j] = gene[dad].feature.px[j];
gene[i].feature.py[j] = gene[dad].feature.py[j];
}
gene[i].nk = gene[mum].nk;
for (j = 0; j < gene[i].nk; j++)
{
gene[i].feature.nz[j] = gene[mum].feature.nz[j];
gene[i].feature.nx[j] = gene[mum].feature.nx[j];
gene[i].feature.ny[j] = gene[mum].feature.ny[j];
}
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
gene[i].age = 0;
}
for (i = ftnum + mnum + hnum; i < ftnum + mnum + hnum + rnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
}
ccfree(gene);
gsl_rng_free(rng);
return best;
}
#define less_than(fit1, fit2, aux) ((fit1).error < (fit2).error)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_best_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_gene_t _ccv_bbf_best_gene(ccv_bbf_gene_t* gene, int pnum, int point_min, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
_ccv_bbf_best_qsort(gene, pnum, 0);
int min_id = 0;
double min_err = gene[0].error;
for (i = 0; i < pnum; i++)
if (gene[i].nk + gene[i].pk >= point_min)
{
min_id = i;
min_err = gene[i].error;
break;
}
PRINT(CCV_CLI_INFO, "local best bbf feature with error %f\n|-size: %d\n|-positive point: ", min_err, gene[min_id].feature.size);
for (i = 0; i < gene[min_id].feature.size; i++)
PRINT(CCV_CLI_INFO, "(%d %d %d), ", gene[min_id].feature.px[i], gene[min_id].feature.py[i], gene[min_id].feature.pz[i]);
PRINT(CCV_CLI_INFO, "\n|-negative point: ");
for (i = 0; i < gene[min_id].feature.size; i++)
PRINT(CCV_CLI_INFO, "(%d %d %d), ", gene[min_id].feature.nx[i], gene[min_id].feature.ny[i], gene[min_id].feature.nz[i]);
PRINT(CCV_CLI_INFO, "\nthe computation takes %d ms\n", timer / 1000);
return gene[min_id];
}
static ccv_bbf_feature_t _ccv_bbf_convex_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_bbf_feature_t* best_feature, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_gene_t best_gene;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j, k, q, p, g, t;
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
int pnum = rows[0] * cols[0] + rows[1] * cols[1] + rows[2] * cols[2];
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc((pnum * (CCV_BBF_POINT_MAX * 2 + 1) * 2 + CCV_BBF_POINT_MAX * 2 + 1) * sizeof(ccv_bbf_gene_t));
if (best_feature == 0)
{
/* bootstrapping the best feature, start from two pixels, one for positive, one for negative
* the bootstrapping process go like this: first, it will assign a random pixel as positive
* and enumerate every possible pixel as negative, and pick the best one. Then, enumerate every
* possible pixel as positive, and pick the best one, until it converges */
memset(&best_gene, 0, sizeof(ccv_bbf_gene_t));
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
best_gene.feature.pz[i] = best_gene.feature.nz[i] = -1;
best_gene.pk = 1;
best_gene.nk = 0;
best_gene.feature.size = 1;
best_gene.feature.pz[0] = gsl_rng_uniform_int(rng, 3);
best_gene.feature.px[0] = gsl_rng_uniform_int(rng, cols[best_gene.feature.pz[0]]);
best_gene.feature.py[0] = gsl_rng_uniform_int(rng, rows[best_gene.feature.pz[0]]);
for (t = 0; ; ++t)
{
g = 0;
if (t % 2 == 0)
{
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.pz[0] || j != best_gene.feature.px[0] || k != best_gene.feature.py[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.nz[0] = i;
gene[g].feature.nx[0] = j;
gene[g].feature.ny[0] = k;
g++;
}
} else {
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.nz[0] || j != best_gene.feature.nx[0] || k != best_gene.feature.ny[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.pz[0] = i;
gene[g].feature.px[0] = j;
gene[g].feature.py[0] = k;
g++;
}
}
PRINT(CCV_CLI_INFO, "bootstrapping round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, 2, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
} else {
best_gene.feature = *best_feature;
best_gene.pk = best_gene.nk = best_gene.feature.size;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->pz[i] == -1)
{
best_gene.pk = i;
break;
}
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->nz[i] == -1)
{
best_gene.nk = i;
break;
}
}
/* after bootstrapping, the float search technique will do the following permutations:
* a). add a new point to positive or negative
* b). remove a point from positive or negative
* c). move an existing point in positive or negative to another position
* the three rules applied exhaustively, no heuristic used. */
for (t = 0; ; ++t)
{
g = 0;
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (!_ccv_bbf_exist_gene_feature(&best_gene, j, k, i))
{
/* add positive point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* add negative point */
if (best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* refine positive point */
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[q] = i;
gene[g].feature.px[q] = j;
gene[g].feature.py[q] = k;
g++;
}
/* add positive point, remove negative point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1 && best_gene.nk > 1)
{
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
for (p = q; p < best_gene.nk - 1; p++)
{
gene[g].feature.nz[p] = gene[g].feature.nz[p + 1];
gene[g].feature.nx[p] = gene[g].feature.nx[p + 1];
gene[g].feature.ny[p] = gene[g].feature.ny[p + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
/* refine negative point */
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[q] = i;
gene[g].feature.nx[q] = j;
gene[g].feature.ny[q] = k;
g++;
}
/* add negative point, remove positive point */
if (best_gene.pk > 1 && best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
for (p = q; p < best_gene.pk - 1; p++)
{
gene[g].feature.pz[p] = gene[g].feature.pz[p + 1];
gene[g].feature.px[p] = gene[g].feature.px[p + 1];
gene[g].feature.py[p] = gene[g].feature.py[p + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
}
if (best_gene.pk > 1)
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.pk - 1; i++)
{
gene[g].feature.pz[i] = gene[g].feature.pz[i + 1];
gene[g].feature.px[i] = gene[g].feature.px[i + 1];
gene[g].feature.py[i] = gene[g].feature.py[i + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
if (best_gene.nk > 1)
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.nk - 1; i++)
{
gene[g].feature.nz[i] = gene[g].feature.nz[i + 1];
gene[g].feature.nx[i] = gene[g].feature.nx[i + 1];
gene[g].feature.ny[i] = gene[g].feature.ny[i + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
gene[g] = best_gene;
g++;
PRINT(CCV_CLI_INFO, "float search round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, CCV_BBF_POINT_MIN, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
ccfree(gene);
gsl_rng_free(rng);
return best_gene.feature;
}
static int _ccv_write_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* w = fopen(file, "wb");
if (w == 0) return -1;
fprintf(w, "%d\n", classifier->count);
union { float fl; int i; } fli;
fli.fl = classifier->threshold;
fprintf(w, "%d\n", fli.i);
int i, j;
for (i = 0; i < classifier->count; i++)
{
fprintf(w, "%d\n", classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
fprintf(w, "%d %d %d\n", classifier->feature[i].px[j], classifier->feature[i].py[j], classifier->feature[i].pz[j]);
fprintf(w, "%d %d %d\n", classifier->feature[i].nx[j], classifier->feature[i].ny[j], classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
flia.fl = classifier->alpha[i * 2];
flib.fl = classifier->alpha[i * 2 + 1];
fprintf(w, "%d %d\n", flia.i, flib.i);
}
fclose(w);
return 0;
}
static int _ccv_read_background_data(const char* file, unsigned char** negdata, int* negnum, ccv_size_t size)
{
int stat = 0;
FILE* r = fopen(file, "rb");
if (r == 0) return -1;
stat |= fread(negnum, sizeof(int), 1, r);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < *negnum; i++)
{
negdata[i] = (unsigned char*)ccmalloc(isizs012);
stat |= fread(negdata[i], 1, isizs012, r);
}
fclose(r);
return 0;
}
static int _ccv_write_background_data(const char* file, unsigned char** negdata, int negnum, ccv_size_t size)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fwrite(&negnum, sizeof(int), 1, w);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < negnum; i++)
fwrite(negdata[i], 1, isizs012, w);
fclose(w);
return 0;
}
static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum)
{
int stat = 0;
FILE* r = fopen(file, "r");
if (r == 0) return -1;
stat |= fscanf(r, "%d %d %d", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
pw[j] = dbi.db;
}
for (j = 0; j < negnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
nw[j] = dbi.db;
}
fclose(r);
return 0;
}
static int _ccv_save_bbf_cacade_training_state(const char* file, int i, int k, int bg, double* pw, double* nw, int posnum, int negnum)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fprintf(w, "%d %d %d\n", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; ++j)
{
dbi.db = pw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
for (j = 0; j < negnum; ++j)
{
dbi.db = nw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
fclose(w);
return 0;
}
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
int i, j, k;
/* allocate memory for usage */
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
cascade->count = 0;
cascade->size = size;
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t));
unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*));
unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*));
double* pw = (double*)ccmalloc(posnum * sizeof(double));
double* nw = (double*)ccmalloc(negnum * sizeof(double));
float* peval = (float*)ccmalloc(posnum * sizeof(float));
float* neval = (float*)ccmalloc(negnum * sizeof(float));
double inv_balance_k = 1. / params.balance_k;
/* balance factor k, and weighted with 0.01 */
params.balance_k *= 0.01;
inv_balance_k *= 0.01;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1);
i = 0;
k = 0;
int bg = 0;
int cacheK = 10;
/* state resume code */
char buf[1024];
sprintf(buf, "%s/stat.txt", dir);
_ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum);
if (i > 0)
{
cascade->count = i;
ccfree(cascade->stage_classifier);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t));
for (j = 0; j < i; j++)
{
sprintf(buf, "%s/stage-%d.txt", dir, j);
_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]);
}
}
if (k > 0)
cacheK = k;
int rpos, rneg = 0;
if (bg)
{
sprintf(buf, "%s/negs.txt", dir);
_ccv_read_background_data(buf, negdata, &rneg, cascade->size);
}
for (; i < params.layer; i++)
{
if (!bg)
{
rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum);
/* save state of background data */
sprintf(buf, "%s/negs.txt", dir);
_ccv_write_background_data(buf, negdata, rneg, cascade->size);
bg = 1;
}
double totalw;
/* save state of cascade : level, weight etc. */
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
ccv_bbf_stage_classifier_t classifier;
if (k > 0)
{
/* resume state of classifier */
sprintf( buf, "%s/stage-%d.txt", dir, i );
_ccv_read_bbf_stage_classifier(buf, &classifier);
} else {
/* initialize classifier */
for (j = 0; j < posnum; j++)
pw[j] = params.balance_k;
for (j = 0; j < rneg; j++)
nw[j] = inv_balance_k;
classifier.count = k;
classifier.threshold = 0;
classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t));
classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float));
}
_ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum);
rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size);
PRINT(CCV_CLI_INFO, "%d postivie data and %d negative data in training\n", rpos, rneg);
/* reweight to 1.00 */
totalw = 0;
for (j = 0; j < rpos; j++)
totalw += pw[j];
for (j = 0; j < rneg; j++)
totalw += nw[j];
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
for (; ; k++)
{
/* get overall true-positive, false-positive rate and threshold */
double tp = 0, fp = 0, etp = 0, efp = 0;
_ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval);
_ccv_sort_32f(peval, rpos, 0);
classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6;
for (j = 0; j < rpos; j++)
{
if (peval[j] >= 0)
++tp;
if (peval[j] >= classifier.threshold)
++etp;
}
tp /= rpos; etp /= rpos;
for (j = 0; j < rneg; j++)
{
if (neval[j] >= 0)
++fp;
if (neval[j] >= classifier.threshold)
++efp;
}
fp /= rneg; efp /= rneg;
PRINT(CCV_CLI_INFO, "stage classifier real TP rate : %f, FP rate : %f\n", tp, fp);
PRINT(CCV_CLI_INFO, "stage classifier TP rate : %f, FP rate : %f at threshold : %f\n", etp, efp, classifier.threshold);
if (k > 0)
{
/* save classifier state */
sprintf(buf, "%s/stage-%d.txt", dir, i);
_ccv_write_bbf_stage_classifier(buf, &classifier);
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
}
if (etp > params.pos_crit && efp < params.neg_crit)
break;
/* TODO: more post-process is needed in here */
/* select the best feature in current distribution through genetic algorithm optimization */
ccv_bbf_feature_t best;
if (params.optimizer == CCV_BBF_GENETIC_OPT)
{
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
} else if (params.optimizer == CCV_BBF_FLOAT_OPT) {
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw);
} else {
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw);
}
double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw);
double rw = (1 - err) / err;
totalw = 0;
/* reweight */
for (j = 0; j < rpos; j++)
{
unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 };
if (!_ccv_run_bbf_feature(&best, steps, u8))
pw[j] *= rw;
pw[j] *= params.balance_k;
totalw += pw[j];
}
for (j = 0; j < rneg; j++)
{
unsigned char* u8[] = { negdata[j], negdata[j] + isizs0, negdata[j] + isizs01 };
if (_ccv_run_bbf_feature(&best, steps, u8))
nw[j] *= rw;
nw[j] *= inv_balance_k;
totalw += nw[j];
}
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
double c = log(rw);
PRINT(CCV_CLI_INFO, "coefficient of feature %d: %f\n", k + 1, c);
classifier.count = k + 1;
/* resizing classifier */
if (k >= cacheK)
{
ccv_bbf_feature_t* feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * 2 * sizeof(ccv_bbf_feature_t));
memcpy(feature, classifier.feature, cacheK * sizeof(ccv_bbf_feature_t));
ccfree(classifier.feature);
float* alpha = (float*)ccmalloc(cacheK * 4 * sizeof(float));
memcpy(alpha, classifier.alpha, cacheK * 2 * sizeof(float));
ccfree(classifier.alpha);
classifier.feature = feature;
classifier.alpha = alpha;
cacheK *= 2;
}
/* setup new feature */
classifier.feature[k] = best;
classifier.alpha[k * 2] = -c;
classifier.alpha[k * 2 + 1] = c;
}
cascade->count = i + 1;
ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t));
ccfree(cascade->stage_classifier);
stage_classifier[i] = classifier;
cascade->stage_classifier = stage_classifier;
k = 0;
bg = 0;
for (j = 0; j < rpos; j++)
ccfree(posdata[j]);
for (j = 0; j < rneg; j++)
ccfree(negdata[j]);
}
ccfree(neval);
ccfree(peval);
ccfree(nw);
ccfree(pw);
ccfree(negdata);
ccfree(posdata);
ccfree(cascade);
}
#else
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n");
}
#endif
static int _ccv_is_equal(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->classification.id == r1->classification.id &&
r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params)
{
int hr = a->rows / params.size.height;
int wr = a->cols / params.size.width;
double scale = pow(2., 1. / (params.interval + 1.));
int next = params.interval + 1;
int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale));
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA);
else
pyr[0] = a;
int i, j, k, t, x, y, q;
for (i = 1; i < ccv_min(params.interval + 1, scale_upto + next * 2); i++)
ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA);
for (i = next; i < scale_upto + next * 2; i++)
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 1], 0, 1, 0);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1);
}
ccv_array_t* idx_seq;
ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
/* detect in multi scale */
for (t = 0; t < count; t++)
{
ccv_bbf_classifier_cascade_t* cascade = _cascade[t];
float scale_x = (float) params.size.width / (float) cascade->size.width;
float scale_y = (float) params.size.height / (float) cascade->size.height;
ccv_array_clear(seq);
for (i = 0; i < scale_upto; i++)
{
int dx[] = {0, 1, 0, 1};
int dy[] = {0, 0, 1, 1};
int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2);
int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step };
int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2);
int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4,
pyr[i * 4 + next * 4]->step * 2 - i_cols * 2,
pyr[i * 4 + next * 8]->step - i_cols };
for (q = 0; q < (params.accurate ? 4 : 1); q++)
{
unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 };
for (y = 0; y < i_rows; y++)
{
for (x = 0; x < i_cols; x++)
{
float sum;
int flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (j = 0; j < cascade->count; ++j, ++classifier)
{
sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (flag)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5));
comp.neighbors = 1;
comp.classification.id = t;
comp.classification.confidence = sum;
ccv_array_push(seq, &comp);
}
u8[0] += 4;
u8[1] += 2;
u8[2] += 1;
}
u8[0] += paddings[0];
u8[1] += paddings[1];
u8[2] += paddings[2];
}
}
scale_x *= scale;
scale_y *= scale;
}
/* the following code from OpenCV's haar feature implementation */
if(params.min_neighbors == 0)
{
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
ccv_array_push(result_seq, comp);
}
} else {
idx_seq = 0;
ccv_array_clear(seq2);
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(seq, &idx_seq, _ccv_is_equal_same_class, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0)
comps[idx].classification.confidence = r1.classification.confidence;
++comps[idx].neighbors;
comps[idx].rect.x += r1.rect.x;
comps[idx].rect.y += r1.rect.y;
comps[idx].rect.width += r1.rect.width;
comps[idx].rect.height += r1.rect.height;
comps[idx].classification.id = r1.classification.id;
comps[idx].classification.confidence = ccv_max(comps[idx].classification.confidence, r1.classification.confidence);
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
{
int n = comps[i].neighbors;
if(n >= params.min_neighbors)
{
ccv_comp_t comp;
comp.rect.x = (comps[i].rect.x * 2 + n) / (2 * n);
comp.rect.y = (comps[i].rect.y * 2 + n) / (2 * n);
comp.rect.width = (comps[i].rect.width * 2 + n) / (2 * n);
comp.rect.height = (comps[i].rect.height * 2 + n) / (2 * n);
comp.neighbors = comps[i].neighbors;
comp.classification.id = comps[i].classification.id;
comp.classification.confidence = comps[i].classification.confidence;
ccv_array_push(seq2, &comp);
}
}
// filter out small face rectangles inside large face rectangles
for(i = 0; i < seq2->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq2, i);
int flag = 1;
for(j = 0; j < seq2->rnum; j++)
{
ccv_comp_t r2 = *(ccv_comp_t*)ccv_array_get(seq2, j);
int distance = (int)(r2.rect.width * 0.25 + 0.5);
if(i != j &&
r1.classification.id == r2.classification.id &&
r1.rect.x >= r2.rect.x - distance &&
r1.rect.y >= r2.rect.y - distance &&
r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance &&
r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance &&
(r2.neighbors > ccv_max(3, r1.neighbors) || r1.neighbors < 3))
{
flag = 0;
break;
}
}
if(flag)
ccv_array_push(result_seq, &r1);
}
ccv_array_free(idx_seq);
ccfree(comps);
}
}
ccv_array_free(seq);
ccv_array_free(seq2);
ccv_array_t* result_seq2;
/* the following code from OpenCV's haar feature implementation */
if (params.flags & CCV_BBF_NO_NESTED)
{
result_seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
idx_seq = 0;
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(result_seq, &idx_seq, _ccv_is_equal, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < result_seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(result_seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0 || comps[idx].classification.confidence < r1.classification.confidence)
{
comps[idx].classification.confidence = r1.classification.confidence;
comps[idx].neighbors = 1;
comps[idx].rect = r1.rect;
comps[idx].classification.id = r1.classification.id;
}
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
if(comps[i].neighbors)
ccv_array_push(result_seq2, &comps[i]);
ccv_array_free(result_seq);
ccfree(comps);
} else {
result_seq2 = result_seq;
}
for (i = 1; i < scale_upto + next * 2; i++)
ccv_matrix_free(pyr[i * 4]);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_matrix_free(pyr[i * 4 + 1]);
ccv_matrix_free(pyr[i * 4 + 2]);
ccv_matrix_free(pyr[i * 4 + 3]);
}
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_matrix_free(pyr[0]);
return result_seq2;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade(const char* directory)
{
char buf[1024];
sprintf(buf, "%s/cascade.txt", directory);
int s, i;
FILE* r = fopen(buf, "r");
if (r == 0)
return 0;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height);
assert(s > 0);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++)
{
sprintf(buf, "%s/stage-%d.txt", directory, i);
if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0)
{
cascade->count = i;
break;
}
}
fclose(r);
return cascade;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s)
{
int i;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
return cascade;
}
int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen)
{
int i;
int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float);
if (slen >= len)
{
memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
}
return len;
}
void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade)
{
int i;
for (i = 0; i < cascade->count; ++i)
{
ccfree(cascade->stage_classifier[i].feature);
ccfree(cascade->stage_classifier[i].alpha);
}
ccfree(cascade->stage_classifier);
ccfree(cascade);
}
|
threadpool.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include <string>
#include <vector>
#include <functional>
#include <memory>
#include "core/common/common.h"
#include "core/platform/env.h"
#include "core/common/optional.h"
#include <functional>
#include <memory>
// This file use PIMPL to avoid having eigen headers here
namespace Eigen {
class Allocator;
class ThreadPoolInterface;
struct ThreadPoolDevice;
} // namespace Eigen
namespace onnxruntime {
struct TensorOpCost {
double bytes_loaded;
double bytes_stored;
double compute_cycles;
};
template <typename Environment>
class ThreadPoolTempl;
namespace concurrency {
class ThreadPool {
public:
// Scheduling strategies for ParallelFor. The strategy governs how the given
// units of work are distributed among the available threads in the
// threadpool.
enum class SchedulingStrategy {
// The Adaptive scheduling strategy adaptively chooses the shard sizes based
// on the cost of each unit of work, and the cost model of the underlying
// threadpool device.
//
// The 'cost_per_unit' is an estimate of the number of CPU cycles (or
// nanoseconds if not CPU-bound) to complete a unit of work. Overestimating
// creates too many shards and CPU time will be dominated by per-shard
// overhead, such as Context creation. Underestimating may not fully make
// use of the specified parallelism, and may also cause inefficiencies due
// to load balancing issues and stragglers.
kAdaptive,
// The Fixed Block Size scheduling strategy shards the given units of work
// into shards of fixed size. In case the total number of units is not
// evenly divisible by 'block_size', at most one of the shards may be of
// smaller size. The exact number of shards may be found by a call to
// NumShardsUsedByFixedBlockSizeScheduling.
//
// Each shard may be executed on a different thread in parallel, depending
// on the number of threads available in the pool. Note that when there
// aren't enough threads in the pool to achieve full parallelism, function
// calls will be automatically queued.
kFixedBlockSize
};
// Contains additional parameters for either the Adaptive or the Fixed Block
// Size scheduling strategy.
class SchedulingParams {
public:
explicit SchedulingParams(SchedulingStrategy strategy, optional<int64_t> cost_per_unit,
optional<std::ptrdiff_t> block_size)
: strategy_(strategy), cost_per_unit_(cost_per_unit), block_size_(block_size) {
}
SchedulingStrategy strategy() const {
return strategy_;
}
optional<int64_t> cost_per_unit() const {
return cost_per_unit_;
}
optional<std::ptrdiff_t> block_size() const {
return block_size_;
}
private:
// The underlying Scheduling Strategy for which this instance contains
// additional parameters.
SchedulingStrategy strategy_;
// The estimated cost per unit of work in number of CPU cycles (or
// nanoseconds if not CPU-bound). Only applicable for Adaptive scheduling
// strategy.
optional<int64_t> cost_per_unit_;
// The block size of each shard. Only applicable for Fixed Block Size
// scheduling strategy.
optional<std::ptrdiff_t> block_size_;
};
#ifdef _WIN32
using NAME_CHAR_TYPE = wchar_t;
#else
using NAME_CHAR_TYPE = char;
#endif
// Constructs a pool that contains "num_threads" threads with specified
// "name". env->StartThread() is used to create individual threads with the
// given ThreadOptions. If "low_latency_hint" is true the thread pool
// implementation may use it as a hint that lower latency is preferred at the
// cost of higher CPU usage, e.g. by letting one or more idle threads spin
// wait. Conversely, if the threadpool is used to schedule high-latency
// operations like I/O the hint should be set to false.
//
// REQUIRES: num_threads > 0
// The allocator parameter is only used for creating a Eigen::ThreadPoolDevice to be used with Eigen Tensor classes.
ThreadPool(Env* env, const ThreadOptions& thread_options, const NAME_CHAR_TYPE* name, int num_threads,
bool low_latency_hint, Eigen::Allocator* allocator = nullptr);
// Constructs a pool that wraps around the thread::ThreadPoolInterface
// instance provided by the caller. Caller retains ownership of
// `user_threadpool` and must ensure its lifetime is longer than the
// ThreadPool instance.
ThreadPool(Eigen::ThreadPoolInterface* user_threadpool, Eigen::Allocator* allocator);
// Waits until all scheduled work has finished and then destroy the
// set of threads.
~ThreadPool();
// Schedules fn() for execution in the pool of threads.
void Schedule(std::function<void()> fn);
// Returns the number of shards used by ParallelForFixedBlockSizeScheduling
// with these parameters.
int NumShardsUsedByFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size);
// ParallelFor shards the "total" units of work assuming each unit of work
// having roughly "cost_per_unit" cost, in cycles. Each unit of work is
// indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work
// and the total cost of each shard is roughly the same.
//
// "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds
// if not CPU-bound) to complete a unit of work. Overestimating creates too
// many shards and CPU time will be dominated by per-shard overhead, such as
// Context creation. Underestimating may not fully make use of the specified
// parallelism, and may also cause inefficiencies due to load balancing
// issues and stragglers.
void ParallelFor(std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
TryParallelFor(tp, total, TensorOpCost{0, 0, static_cast<double>(cost_per_unit)}, fn);
}
void ParallelFor(std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(cost_per_unit);
std::ptrdiff_t num_threads = concurrency::ThreadPool::NumThreads(tp);
if (total < num_threads) {
num_threads = total;
}
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < num_threads; i++) {
std::ptrdiff_t start, work_remaining;
PartitionWork(i, num_threads, total, &start, &work_remaining);
std::ptrdiff_t end = start + work_remaining;
fn(start, end);
}
#else
if (tp == nullptr) {
fn(0, total);
return;
}
tp->ParallelFor(total, cost_per_unit, fn);
#endif
}
// Similar to ParallelFor above, but takes the specified scheduling strategy
// into account.
void
ParallelFor(std::ptrdiff_t total, const SchedulingParams& scheduling_params,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, const SchedulingParams& scheduling_params,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(scheduling_params);
std::ptrdiff_t num_threads = concurrency::ThreadPool::NumThreads(tp);
if (total < num_threads) {
num_threads = total;
}
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < num_threads; i++) {
std::ptrdiff_t start, work_remaining;
PartitionWork(i, num_threads, total, &start, &work_remaining);
std::ptrdiff_t end = start + work_remaining;
fn(start, end);
}
#else
if (tp == nullptr) {
fn(0, total);
return;
}
tp->ParallelFor(total, scheduling_params, fn);
#endif
} // namespace concurrency
// Prefer using this API to get the number of threads unless you know what you're doing.
// This API takes into account if openmp is enabled/disabled and if the thread pool ptr is nullptr.
static int NumThreads(const concurrency::ThreadPool* tp);
// Returns the number of threads in the pool. Preferably use the static version of this API instead.
int NumThreads() const;
// Returns current thread id between 0 and NumThreads() - 1, if called from a
// thread in the pool. Returns -1 otherwise.
int CurrentThreadId() const;
// If ThreadPool implementation is compatible with Eigen::ThreadPoolInterface,
// returns a non-null pointer. The caller does not own the object the returned
// pointer points to, and should not attempt to delete.
Eigen::ThreadPoolInterface* AsEigenThreadPool() const;
// Directly schedule the 'total' tasks to the underlying threadpool, without
// cutting them by halves
void SimpleParallelFor(std::ptrdiff_t total, std::function<void(std::ptrdiff_t)> fn);
/**
* Tries to call the given function in parallel, with calls split into (num_batches) batches.
*\param num_batches If it is zero, it will be replaced to the value of NumThreads().
*\param fn A std::function or STL style functor with signature of "void f(int32_t);"
* Pitfall: Caller should cap `num_batches` to a reasonable value based on the cost of `fn` and the value of `total`.
*For example, if fn is as simple as: int sum=0; fn = [&](int i){sum +=i;} and `total` is 100, then num_batches should
*be just 1.
*
* ```
**/
template <typename F>
inline static void TryBatchParallelFor(ThreadPool* tp, std::ptrdiff_t total, F&& fn, std::ptrdiff_t num_batches) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
ORT_UNUSED_PARAMETER(num_batches);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp == nullptr) {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
return;
}
if (total <= 0)
return;
if (total == 1) {
fn(0);
return;
}
if (num_batches <= 0) {
num_batches = std::min<ptrdiff_t>(total, tp->NumThreads());
}
if (num_batches <= 1) {
for (int i = 0; i < total; i++) {
fn(i);
}
return;
}
tp->SimpleParallelFor(num_batches, [&](std::ptrdiff_t batch_index) {
std::ptrdiff_t start, work_remaining;
PartitionWork(batch_index, num_batches, total, &start, &work_remaining);
std::ptrdiff_t end = start + work_remaining;
for (std::ptrdiff_t i = start; i < end; i++) {
fn(i);
}
});
#endif
}
#ifndef _OPENMP
//Deprecated. Please avoid using Eigen Tensor because it will blow up binary size quickly.
Eigen::ThreadPoolDevice& Device() {
return *threadpool_device_;
}
#endif
ORT_DISALLOW_COPY_AND_ASSIGNMENT(ThreadPool);
private:
// Divides the work represented by the range [0, total) into k shards.
// Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k).
// Each shard may be executed on a different thread in parallel, depending on
// the number of threads available in the pool.
// When (i+1)*block_size > total, fn(i*block_size, total) is called instead.
// Here, k = NumShardsUsedByFixedBlockSizeScheduling(total, block_size).
// Requires 0 < block_size <= total.
void ParallelForFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
ThreadOptions thread_options_;
// underlying_threadpool_ is the user_threadpool if user_threadpool is
// provided in the constructor. Otherwise it is the eigen_threadpool_.
Eigen::ThreadPoolInterface* underlying_threadpool_;
// eigen_threadpool_ is instantiated and owned by thread::ThreadPool if
// user_threadpool is not in the constructor.
std::unique_ptr<ThreadPoolTempl<Env> > eigen_threadpool_;
#ifndef _OPENMP
std::unique_ptr<Eigen::ThreadPoolDevice> threadpool_device_;
#endif
// Copied from MlasPartitionWork
static void PartitionWork(std::ptrdiff_t ThreadId, std::ptrdiff_t ThreadCount, std::ptrdiff_t TotalWork,
std::ptrdiff_t* WorkIndex, std::ptrdiff_t* WorkRemaining) {
const std::ptrdiff_t WorkPerThread = TotalWork / ThreadCount;
const std::ptrdiff_t WorkPerThreadExtra = TotalWork % ThreadCount;
if (ThreadId < WorkPerThreadExtra) {
*WorkIndex = (WorkPerThread + 1) * ThreadId;
*WorkRemaining = WorkPerThread + 1;
} else {
*WorkIndex = WorkPerThread * ThreadId + WorkPerThreadExtra;
*WorkRemaining = WorkPerThread;
}
}
}; // namespace concurrency
} // namespace concurrency
} // namespace onnxruntime
|
SpaceFrame v4.4.h | #include <Windows.h>
#include <ctype.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
using namespace std;
class SpaceFrame // Calculator of SpaceFrame
{
private:
double EPS;
double MAXTS;
double MAXLV;
int TNN; // total number of nodes
int NFIN; // number of fixed nodes
int NFRN; // number of free nodes
int NOR; // number of rods
int NOL; // number of loads
int NOS; // number of sections
struct Node // parameters of nodes
{
double XCN; // X coordinate of nodes
double YCN; // Y coordinate of nodes
double ZCN; // Z coordinate of nodes
} * nodes; // parameters of nodes
struct Rod // parameters of nodes
{
int ENR; // the end node number of rods
int BNR; // the beginning node number of rods
double ELASTIC; // elastic modulus
double SHEAR; // shear modulus
double AREA; // area
double IMY; // inertia moment of Y axis
double IMZ; // inertia moment of Z axis
double THETA; // theta the deflection angle of main inertia axis
double LCS[4]; // the length, sine and cosine of rods
double RFE[6]; // the reaction force of the end node
} * rods; // parameters of nodes
struct Load // parameters of loads
{
int NRL; // the number of rods with load
int PLI; // the plane of the load's in
int KOL; // the kind of load
double VOL; // the value of load
double DLB; // the distance between load and the beginning node
} * loads; // parameters of loads
struct Section // parameters of sections
{
int NRS; // the number of rod with section
double DSB; // the distance between section and the beginning node
double IFS[6]; // the internal force in the section
} * sections; // parameters of sections
double *TotalStiffness; // total stiffness
double *LoadVector; // load vector
double *Displacement; // displacement of nodes
int *IV; // the location of diagonal element
int NSI; // upper limit
int MAXIBDW; // half bandwidth
bool ProgressBar; // open progress bar
bool Parallel; // open parallel
int status;
// calculate the length sine and cosine of rods
bool sfLCosSin()
{
for (int k = 0; k < NOR; k++)
{
int i = rods[k].BNR - 1, j = rods[k].ENR - 1; // index of beginning and end nodes of rods
rods[k].LCS[1] = nodes[j].XCN - nodes[i].XCN;
rods[k].LCS[2] = nodes[j].YCN - nodes[i].YCN;
rods[k].LCS[3] = nodes[j].ZCN - nodes[i].ZCN;
rods[k].LCS[0] = sqrt(rods[k].LCS[1] * rods[k].LCS[1] + rods[k].LCS[2] * rods[k].LCS[2] + rods[k].LCS[3] * rods[k].LCS[3]);
if (rods[k].LCS[0] < EPS) // if the length of rod is too small, then return error
return sfPrintError(8);
rods[k].LCS[1] = rods[k].LCS[1] / rods[k].LCS[0];
rods[k].LCS[2] = rods[k].LCS[2] / rods[k].LCS[0];
rods[k].LCS[3] = rods[k].LCS[3] / rods[k].LCS[0];
}
return 0;
}
// allocate total stiffness matrix, load vector and displacement vector
bool sfAllocate()
{
int it = 0, mm = 0, dof = 6 * NFRN, *peribdw = new int[TNN](); // bandwidth per line in total stiffness matrix
IV = new int[dof]();
for (int i = 0; i < NOR; i++) // for each rod
{
if (rods[i].BNR > NFIN)
{
mm = rods[i].ENR - rods[i].BNR; // bandwidth is end number minus begin number
if (mm > peribdw[rods[i].ENR - 1])
peribdw[rods[i].ENR - 1] = mm; // find the maximum bandwith per line
}
}
for (int i = NFIN; i < TNN; i++) // for each line in total stiffness matrix
{
if (peribdw[i] > MAXIBDW) // find maxim
MAXIBDW = peribdw[i];
for (int j = 1; j <= 6; j++)
{
it = it + 1;
if (it == 1)
IV[it - 1] = 6 * peribdw[i] + j;
else
IV[it - 1] = IV[it - 2] + 6 * peribdw[i] + j;
}
}
MAXIBDW = 6 * MAXIBDW + 5;
NSI = IV[dof - 1];
delete[] peribdw;
TotalStiffness = new double[NSI](); // allocate memory for total stiffness matrix
LoadVector = new double[dof](); // allocate memory for load vector
Displacement = new double[dof](); // allocate memory for displacement vector
return 0;
}
// build total stiffness matrix
bool sfBuildTotalStiff() // ts is total stiffness matrix
{
double us[36] = {0}; // unit stiffness matrix
int p[2] = {0}; // p is a temperary vector for i0j0, dof is the degree of freedom of nods
for (int k = 0; k < NOR; k++)
{
p[0] = 6 * (rods[k].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[k].ENR - NFIN - 1);
for (int i = 0; i < 2; i++)
{
if (p[i] >= 0) // determine free node
{
if (sfBuildUnitStiff(k, i + 1, us)) // build unit stiffness matrix
return sfPrintError(7);
for (int m = 0; m < 6; m++)
for (int n = 0; n <= m; n++)
TotalStiffness[IV[(p[i] + m)] + (p[i] + n) - (p[i] + m) - 1] += us[m * 6 + n]; // superpose
}
}
if (p[0] >= 0 && p[1] >= 0)
{
if (sfBuildUnitStiff(k, 3 + 1, us)) // build unit stiffness matrix
return sfPrintError(7);
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
TotalStiffness[IV[(p[1] + m)] + (p[0] + n) - (p[1] + m) - 1] += us[m * 6 + n]; // superpose
}
}
for (int i = 0; i < NSI; i++)
if (fabs(TotalStiffness[i]) > MAXTS)
MAXTS = TotalStiffness[i];
return 0;
}
// build unit stiffness matrix
bool sfBuildUnitStiff(int k, int flag, double *us) // k is the number of rods, flag is the index of matrix parts, us is the unit stiffness matrix
{
if (k < 0 || flag < 1 || flag > 4 || us == NULL)
return sfPrintError(16);
double rd[36] = {0}, t[36] = {0}, c[36] = {0}, tmp = 0; // rd is local stiffness matrix, t is transpose matrix, c is a temperary matrix
memset(us, 0, 36 * sizeof(double));
if (sfBuildLocalStiff(k, flag, rd)) // build local stiffness matrix
return sfPrintError(9);
if (sfBuildTrans(k, t)) // build transpose matrix
return sfPrintError(10);
for (int i = 0; i < 6; i++) // transpose matrix times local stiffness matrix, store the result in c
for (int m = 0; m < 6; m++)
{
tmp = t[i * 6 + m];
for (int j = 0; j < 6; j++)
c[i * 6 + j] += tmp * rd[m * 6 + j];
}
for (int i = 0; i < 6; i++) // c times the transposition of transpose matrix, store the result in unit stiff
for (int j = 0; j < 6; j++)
for (int m = 0; m < 6; m++)
us[i * 6 + j] += c[i * 6 + m] * t[j * 6 + m];
return 0;
}
// build local stiffness matrix
bool sfBuildLocalStiff(int k, int flag, double *rd) // k is the number of rods, flag is the number of matrix
{
if (k < 0 || flag < 0 || flag > 4 || rd == NULL)
return sfPrintError(17);
double a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0, l = rods[k].LCS[0];
a = rods[k].ELASTIC * rods[k].AREA / l; // EA/1
b = rods[k].SHEAR * (rods[k].IMY + rods[k].IMZ) / l; // GJ(p)/1
c = 4 * rods[k].ELASTIC * rods[k].IMY / l; // 4EJ(y)/1
d = c / 2 * 3 / l; // 6EJ(z)/l/l
e = 2 * d / l; // 12EJ(y)/l/l/l
f = 4 * rods[k].ELASTIC * rods[k].IMZ / l; // 4EJ(z)/l
g = f / 2 * 3 / l; // 6EJ(Z)/l/l
h = 2 * g / l; // 12EJ(z)/l/l/l
switch (flag)
{
case 1: // k11
rd[0 * 6 + 0] = a;
rd[1 * 6 + 1] = h;
rd[1 * 6 + 5] = rd[5 * 6 + 1] = g;
rd[2 * 6 + 2] = e;
rd[2 * 6 + 4] = rd[4 * 6 + 2] = -d;
rd[3 * 6 + 3] = b;
rd[4 * 6 + 4] = c;
rd[5 * 6 + 5] = f;
break;
case 2: // k22
rd[0 * 6 + 0] = a;
rd[1 * 6 + 1] = h;
rd[1 * 6 + 5] = rd[5 * 6 + 1] = -g;
rd[2 * 6 + 2] = e;
rd[2 * 6 + 4] = rd[4 * 6 + 2] = d;
rd[3 * 6 + 3] = b;
rd[4 * 6 + 4] = c;
rd[5 * 6 + 5] = f;
break;
case 3: // k12
rd[0 * 6 + 0] = -a;
rd[1 * 6 + 1] = -h;
rd[1 * 6 + 5] = g;
rd[5 * 6 + 1] = -g;
rd[2 * 6 + 2] = -e;
rd[2 * 6 + 4] = -d;
rd[4 * 6 + 2] = d;
rd[3 * 6 + 3] = -b;
rd[4 * 6 + 4] = c / 2;
rd[5 * 6 + 5] = f / 2;
break;
case 4: // k21
rd[0 * 6 + 0] = -a;
rd[1 * 6 + 1] = -h;
rd[1 * 6 + 5] = -g;
rd[5 * 6 + 1] = g;
rd[2 * 6 + 2] = -e;
rd[2 * 6 + 4] = d;
rd[4 * 6 + 2] = -d;
rd[3 * 6 + 3] = -b;
rd[4 * 6 + 4] = c / 2;
rd[5 * 6 + 5] = f / 2;
break;
default:
break;
}
return 0;
}
// build transpose matrix
bool sfBuildTrans(int k, double *t) // k is the number of rods, t is transpose matrix
{
if (k < 0 || t == NULL)
return sfPrintError(18);
double coa = 0, cob = 0, coc = 0, sic = 0, sit = 0, cot = 0, m = 0, n = 0; // co means cosine, si means sine, m and n is temperary variable
memset(t, 0, 36 * sizeof(double));
coa = rods[k].LCS[1]; // cosine alpha
cob = rods[k].LCS[2]; // cosine beta
coc = rods[k].LCS[3]; // cosine gama
sit = sin(rods[k].THETA); // sine theta
cot = cos(rods[k].THETA); // cosine theta
if (fabs(coc - 1) < EPS) // vertical(z axis positive direction) rods' transpose matrix
{
t[2 * 6 + 0] = t[5 * 6 + 3] = 1;
t[0 * 6 + 1] = t[3 * 6 + 4] = t[1 * 6 + 2] = t[4 * 6 + 5] = sit;
t[1 * 6 + 1] = t[4 * 6 + 4] = cot;
t[0 * 6 + 2] = t[3 * 6 + 5] = -cot;
}
else if (fabs(coc + 1) < EPS) // vertical(z axis negative direction) rods' transpose matrix
{
t[2 * 6 + 0] = t[5 * 6 + 3] = -1;
t[0 * 6 + 1] = t[3 * 6 + 4] = sit;
t[1 * 6 + 2] = t[4 * 6 + 5] = -sit;
t[1 * 6 + 1] = t[4 * 6 + 4] = t[0 * 6 + 2] = t[3 * 6 + 5] = cot;
}
else
{
sic = sqrt(1 - coc * coc); // sine gama
m = coa * coc; // cosine alpha times cosine gama
n = cob * coc; // cosine beta times cosine gama
t[0 * 6 + 0] = t[3 * 6 + 3] = coa;
t[1 * 6 + 0] = t[4 * 6 + 3] = cob;
t[2 * 6 + 0] = t[5 * 6 + 3] = coc;
t[0 * 6 + 1] = t[3 * 6 + 4] = (cob * sit - m * cot) / sic;
t[1 * 6 + 1] = t[4 * 6 + 4] = -(n * cot + coa * sit) / sic;
t[2 * 6 + 1] = t[5 * 6 + 4] = cot * sic;
t[0 * 2 + 2] = t[3 * 6 + 5] = (m * sit + cob * cot) / sic;
t[1 * 6 + 2] = t[4 * 6 + 5] = (n * sit - coa * cot) / sic;
t[2 * 6 + 2] = t[5 * 6 + 5] = -sit * sic;
}
return 0;
}
// build load vector
bool sfBuildLoadVector() // lv is the load vector
{
int rod = 0, p[2] = {0}; // rod is the number of rods, dof is the degree of freedom
double rf[12] = {0}, t[36] = {0}; // rf is the reaction force matrix, t is the transpose matrix, p is a temperary vector for i0j0
for (int i = 0; i < NOL; i++)
{
rod = loads[i].NRL - 1; // the number of rods with load
memset(rf, 0, 12 * sizeof(double)); // zero clearing
if (sfReactionForce(i, &rf[0 * 6], &rf[1 * 6])) // calculate reaction force
return sfPrintError(11);
for (int j = 0; j < 6; j++) // add reaction force to RFE
rods[rod].RFE[j] += rf[1 * 6 + j];
if (sfBuildTrans(rod, t)) // build transpose matrix
return sfPrintError(10);
p[0] = 6 * (rods[rod].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[rod].ENR - NFIN - 1);
for (int j = 0; j < 2; j++) // add reaction force to load vector
if (p[j] >= 0) // determine free node
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
LoadVector[p[j] + m] -= t[m * 6 + n] * rf[j * 6 + n];
}
for (int i = 0; i < 6 * NFRN; i++)
if (fabs(LoadVector[i]) > MAXLV)
MAXLV = LoadVector[i];
return 0;
}
// calculate reaction force
bool sfReactionForce(int i, double *rfb, double *rfe) // i is the number of load, rfb and rfe is the reaction force at begining and end of rods
{
if (i < 0 || rfb == NULL || rfe == NULL)
return sfPrintError(20);
double ra = 0, rb = 0, a = 0, b = 0, q = loads[i].VOL, xq = loads[i].DLB; // ra, rb, a and b are middle variable
int rod = loads[i].NRL - 1, pm = loads[i].PLI, t = 0; // rod is the number of rods
if (pm == 0) // load is in XY plane
t = -1; // The bending moment in the support-reaction equation is positive clockwise, convert it to positive to the coordinate axis
else if (pm == 1) // load is in XZ plane
t = 1; // The bending moment in the support-reaction equation is positive clockwise, convert it to positive to the coordinate axis
ra = loads[i].DLB / rods[rod].LCS[0]; // x(q) / L
rb = 1 - ra; // 1 - x(q) / L
switch (loads[i].KOL)
{
case 1: // vertical concentrating load
a = rb * rb;
rfb[pm + 1] = -q * rb * (1 + ra - 2 * ra * ra);
rfe[pm + 1] = -q - rfb[pm + 1];
rfb[5 - pm] = t * q * rb * ra * (rods[rod].LCS[0] - xq);
rfe[5 - pm] = -t * q * ra * rb * xq;
break;
case 2: // vertical uniform load
a = q * xq;
b = a * xq / 12;
rfb[pm + 1] = -a * (1 + 0.5 * ra * ra * ra - ra * ra);
rfe[pm + 1] = -a - rfb[pm + 1];
rfb[5 - pm] = t * b * (6 - 8 * ra + 3 * ra * ra);
rfe[5 - pm] = -t * b * (4 * ra - 3 * ra * ra);
break;
case 3: // axial concentrating force when PLI == 0, torque when PLI ==1
rfb[3 * pm] = -q * rb;
rfe[3 * pm] = -q * ra;
break;
case 4: // axial uniform load
a = q * xq;
rfe[3 * pm] = -a * ra / 2;
rfb[3 * pm] = -a - rfe[3 * pm];
break;
case 5: // vertical triangle distributed load
a = q * xq / 2;
b = -0.4 * ra * ra;
rfb[pm + 1] = -2 * a * (0.5 - 0.75 * ra * ra + 0.4 * ra * ra * ra);
rfe[pm + 1] = -a - rfb[pm + 1];
rfb[5 - pm] = t * a * (2 / 3 + b - ra);
rfe[5 - pm] = -t * a * (0.5 * ra + b);
break;
case 6: // concentrating bending moment
rfb[2 - pm] = t * 6 * q * rb * ra / rods[rod].LCS[0];
rfe[2 - pm] = -rfb[2 - pm];
rfb[pm + 4] = t * q * rb * (-1 + 3 * ra);
rfe[pm + 4] = t * q * ra * (2 - 3 * ra);
break;
case 7: // unifrom temperature rise
rfb[0] = q * xq * rods[rod].ELASTIC * rods[rod].AREA;
rfe[0] = -rfb[0];
break;
case 8: // different temperature rise
if (pm == 0)
a = rods[rod].IMZ;
else if (pm == 1)
a = rods[rod].IMY;
rfb[5 - pm] = t * q * 2 * rods[rod].ELASTIC * a * xq;
rfe[5 - pm] = -rfb[5 - pm];
break;
default:
break;
}
return 0;
}
// solve equation of matrix by conjugate gradient
bool sfConjugateGradient(double *A, double *b, double *x, int N)
{
if (A == NULL || b == NULL || x == NULL || N == 0)
return sfPrintError(12);
double *r = NULL, *p = NULL, *z = NULL;
double gamma = 0, gamma_new = 0, gamma_new_sqrt = 0, alpha = 0, beta = 0;
int percent = 0, percent_new = 0;
if (ProgressBar)
cout << "\rSolving equation [ 0%% ][ ]";
r = (double *)malloc(N * sizeof(double));
memset(r, 0, sizeof(double));
p = (double *)malloc(N * sizeof(double));
memset(p, 0, sizeof(double));
z = (double *)malloc(N * sizeof(double));
memset(z, 0, sizeof(double));
for (int i = 0; i < NSI; i++)
A[i] = A[i] / MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] / MAXLV;
// x = [0 ... 0]
// r = b - A * x
// p = r
// gamma = r' * r
gamma = 0.0;
for (int i = 0; i < N; ++i)
{
x[i] = 0.0;
r[i] = b[i];
p[i] = r[i];
gamma += r[i] * r[i];
}
for (int n = 0; 1; ++n)
{
// z = A * p
for (int i = 0; i < N; i++)
{
z[i] = 0.0;
for (int j = 0; j < N; j++)
{
if (i == j)
{
z[i] += A[IV[i] - 1] * p[j];
}
else if (j > i)
{
if ((IV[j] - j + i) > IV[j - 1])
z[i] += A[IV[j] - j + i - 1] * p[j];
else
z[i] += 0;
}
else if (i > j)
{
if ((IV[i] - i + j) > IV[i - 1])
z[i] += A[IV[i] - i + j - 1] * p[j];
else
z[i] += 0;
}
}
}
// alpha = gamma / (p' * z)
alpha = 0.0;
for (int i = 0; i < N; ++i)
alpha += p[i] * z[i];
alpha = gamma / alpha;
// x = x + alpha * p
// r = r - alpha * z
// gamma_new = r' * r
gamma_new = 0.0;
for (int i = 0; i < N; ++i)
{
x[i] += alpha * p[i];
r[i] -= alpha * z[i];
gamma_new += r[i] * r[i];
}
gamma_new_sqrt = sqrt(gamma_new);
if (gamma_new_sqrt < EPS)
break;
if (ProgressBar)
{
percent_new = (int)((1 - log10(gamma_new_sqrt / EPS) / 16) * 100);
if (percent_new > percent)
{
percent = percent_new;
cout << "\rSolving equation ";
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
cout << ".";
else
cout << " ";
cout << "[ " << percent << "%% ]";
cout << "[";
for (int i = 0; i < 49; i++)
if (i < percent / 2)
cout << "=";
else
cout << " ";
cout << "]";
}
else
{
cout << "\rSolving equation ";
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
cout << ".";
else
cout << " ";
}
}
beta = gamma_new / gamma;
// p = r + (gamma_new / gamma) * p;
for (int i = 0; i < N; ++i)
p[i] = r[i] + beta * p[i];
// gamma = gamma_new
gamma = gamma_new;
}
for (int i = 0; i < NSI; i++)
A[i] = A[i] * MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] * MAXLV;
for (int i = 0; i < N; i++)
x[i] = x[i] * MAXLV / MAXTS;
if (ProgressBar)
cout << "\rSolving equation done [ 100%% ][=================================================]\n";
free(r);
free(p);
free(z);
return 0;
}
// solve equation of matrix by conjugate gradient parallel
bool sfConjugateGradientPar(double *A, double *b, double *x, int N)
{
if (A == NULL || b == NULL || x == NULL || N == 0)
return sfPrintError(12);
double *r = NULL, *p = NULL, *z = NULL;
double gamma = 0, gamma_new = 0, gamma_new_sqrt = 0, alpha = 0, beta = 0;
int percent = 0, percent_new = 0;
if (ProgressBar)
cout << "\rSolving equation [ 0%% ][ ]";
r = (double *)malloc(N * sizeof(double));
memset(r, 0, sizeof(double));
p = (double *)malloc(N * sizeof(double));
memset(p, 0, sizeof(double));
z = (double *)malloc(N * sizeof(double));
memset(z, 0, sizeof(double));
for (int i = 0; i < NSI; i++)
A[i] = A[i] / MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] / MAXLV;
// x = [0 ... 0]
// r = b - A * x
// p = r
// gamma = r' * r
gamma = 0.0;
#pragma omp parallel for reduction(+ \
: gamma)
for (int i = 0; i < N; ++i)
{
x[i] = 0.0;
r[i] = b[i];
p[i] = r[i];
gamma += r[i] * r[i];
}
for (int n = 0; true; ++n)
{
// z = A * p
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
z[i] = 0.0;
for (int j = 0; j < N; j++)
{
if (i == j)
{
z[i] += A[IV[i] - 1] * p[j];
}
else if (j > i)
{
if ((IV[j] - j + i) > IV[j - 1])
z[i] += A[IV[j] - j + i - 1] * p[j];
else
z[i] += 0;
}
else if (i > j)
{
if ((IV[i] - i + j) > IV[i - 1])
z[i] += A[IV[i] - i + j - 1] * p[j];
else
z[i] += 0;
}
}
}
// alpha = gamma / (p' * z)
alpha = 0.0;
#pragma omp parallel for reduction(+ \
: alpha)
for (int i = 0; i < N; ++i)
alpha += p[i] * z[i];
alpha = gamma / alpha;
// x = x + alpha * p
// r = r - alpha * z
// gamma_new = r' * r
gamma_new = 0.0;
#pragma omp parallel for reduction(+ \
: gamma_new)
for (int i = 0; i < N; ++i)
{
x[i] += alpha * p[i];
r[i] -= alpha * z[i];
gamma_new += r[i] * r[i];
}
gamma_new_sqrt = sqrt(gamma_new);
if (gamma_new_sqrt < EPS)
break;
if (ProgressBar)
{
percent_new = (int)((1 - log10(gamma_new_sqrt * 1e15) / 16) * 100);
if (percent_new > percent)
{
percent = percent_new;
cout << "\rSolving equation ";
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
cout << ".";
else
cout << " ";
cout << "[ " << percent << "%% ]";
cout << "[";
for (int i = 0; i < 49; i++)
if (i < percent / 2)
cout << "=";
else
cout << " ";
cout << "]";
}
else
{
cout << "\rSolving equation ";
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
cout << ".";
else
cout << " ";
}
}
beta = gamma_new / gamma;
// p = r + (gamma_new / gamma) * p;
#pragma omp parallel for
for (int i = 0; i < N; ++i)
p[i] = r[i] + beta * p[i];
// gamma = gamma_new
gamma = gamma_new;
}
for (int i = 0; i < NSI; i++)
A[i] = A[i] * MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] * MAXLV;
for (int i = 0; i < N; i++)
x[i] = x[i] * MAXLV / MAXTS;
if (ProgressBar)
cout << "\rSolving equation done [ 100%% ][=================================================]\n";
free(r);
free(p);
free(z);
return 0;
}
// calculate internal force of rods
bool sfInternalForce(int mm, int k, double xp) // m is the number of sections, k is the actual number of rods, xp is the distance between the section and the begining of rods
{
if (mm < 0 || k < 0)
return sfPrintError(21);
double tf[6] = {0}; // tf is temperary variable
sections[mm].IFS[0] = +rods[k - 1].RFE[0]; // calculate internal force cause by reaction force at the end of rods
sections[mm].IFS[1] = -rods[k - 1].RFE[1];
sections[mm].IFS[2] = -rods[k - 1].RFE[2];
sections[mm].IFS[3] = +rods[k - 1].RFE[3];
sections[mm].IFS[4] = -rods[k - 1].RFE[4] + rods[k - 1].RFE[2] * (rods[k - 1].LCS[0] - xp);
sections[mm].IFS[5] = +rods[k - 1].RFE[5] + rods[k - 1].RFE[1] * (rods[k - 1].LCS[0] - xp);
for (int i = 0; i < NOL; i++) // for every rods
if (loads[i].NRL == k) // if load is on rod k
{
memset(tf, 0, 6 * sizeof(double)); // zero clear tf
if (sfCtlInternalForce(i, xp, tf)) // calculate internal force of cantilever beam
return sfPrintError(13);
for (int j = 0; j < 6; j++) // add internal force of cantilever into IFR
sections[mm].IFS[j] += tf[j];
}
if (sfDisplacementForce(k, tf)) // calculate end force
return sfPrintError(14);
sections[mm].IFS[0] -= tf[0]; // calculate section force cause by end force
sections[mm].IFS[1] += tf[1];
sections[mm].IFS[2] += tf[2];
sections[mm].IFS[3] -= tf[3];
sections[mm].IFS[4] += tf[4] + tf[2] * xp;
sections[mm].IFS[5] -= tf[5] - tf[1] * xp;
return 0;
}
// calculate internal force of cantilever beam
bool sfCtlInternalForce(int i, double xp, double *tf) // i is the number of load, xp is the distance between the section and the begining of rod, tf is internal force
{
if (i < 0 || tf == NULL)
return sfPrintError(22);
double xq = loads[i].DLB, t = xq - xp, r = xp / xq, q = loads[i].VOL; // t and r are temperary variables
int e = loads[i].PLI;
switch (loads[i].KOL) // calculate section force according to kind of loads
{
case 1:
if (xp < xq)
{
tf[e + 1] = -q;
tf[5 - e] = q * t;
}
break;
case 2:
if (xp < xq)
{
tf[e + 1] = -q * t;
tf[5 - e] = 0.5 * q * t * t;
}
break;
case 3:
if (xp < xq)
tf[3 * e] = q;
break;
case 4:
if (xp < xq)
tf[3 * e] = q * t;
break;
case 5:
if (xp < xq)
{
tf[e + 1] = -q * (1 + r) * t / 2;
tf[5 - e] = q * t * t * (2 + r) / 6;
}
break;
case 6:
if (xp < xq)
tf[e + 4] = (2 * e - 1) * q;
break;
case 7: // temperature change don't generate internal force on cantilever beam
break;
case 8:
break;
default:
break;
}
return 0;
}
// calculate internal force of displacement
bool sfDisplacementForce(int k, double *tref) // k is the actual number of rods, tref is the end force of rods
{
if (k < 1 || tref == NULL)
return sfPrintError(23);
int p[2] = {0}; // p is a temperary vector for i0j0
double rd[36] = {0}, rdb[36] = {0}, t[36] = {0}; // rd
memset(tref, 0, 6 * sizeof(double));
if (sfBuildTrans(k - 1, t)) // calculate transpose matrix
return sfPrintError(10);
p[0] = 6 * (rods[k - 1].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[k - 1].ENR - NFIN - 1);
for (int i = 0; i < 2; i++)
{
if (p[i] >= 0) // determine free node
{
if (sfBuildLocalStiff(k - 1, 2 * i + 1, rd)) // build unit stiffness matrix
return sfPrintError(9);
memset(rdb, 0, 36 * sizeof(double)); // zero clean rdb
for (int j = 0; j < 6; j++) // rd times transposition of transpose matrix
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
rdb[j * 6 + m] += rd[j * 6 + n] * t[m * 6 + n];
for (int j = 0; j < 6; j++) // rdb times DON
for (int m = 0; m < 6; m++)
tref[j] += rdb[j * 6 + m] * Displacement[p[i] + m];
}
else // fixed node
for (int j = 0; j < 3; j++)
tref[j] += 0;
}
return 0;
}
// print"----------------------------------------"
bool sfPrintLine()
{
cout << "-------------------------------------------------------------------------------------------------------------------------------\n";
return 0;
}
// print"****************************************"
bool sfPrintLine2()
{
cout << "**************************************************************************\n";
return 0;
}
// print error
bool sfPrintError(int error)
{
cout << "ERROR:\t";
switch (error)
{
case 1:
cout << "Data input failed!\n";
break;
case 2:
cout << "Building total stiffness matrix failed!\n";
break;
case 3:
cout << "Building load vector failed!\n";
break;
case 4:
cout << "Solving equation failed!\n";
break;
case 5:
cout << "Calculating internal force failed!\n";
break;
case 6:
cout << "Calculating length, cosine and sine failed!\n";
break;
case 7:
cout << "Building unit stiffness matrix failed!\n";
break;
case 8:
cout << "The length of a rod is too small!\n";
break;
case 9:
cout << "Building local stiffness matrix filed!\n";
break;
case 10:
cout << "Building transpose matrix failed!\n";
break;
case 11:
cout << "Calculating reaction force failed!\n";
break;
case 12:
cout << "There is something wrong in the equation!\n";
break;
case 13:
cout << "calculating internal force of cantilever beam failed!\n";
break;
case 14:
cout << "Calculating end force failed!\n";
break;
case 15:
cout << "Allocating total stiffness matrix failed!\n";
break;
case 16:
cout << "There is something wrong in building unit stiffness matrix!\n";
break;
case 17:
cout << "There is something wrong in building local stiffness matrix!\n";
break;
case 18:
cout << "There is something wrong in building transpose matrix failed!\n";
break;
case 19:
cout << "There is something wrong in building load vector!\n";
break;
case 20:
cout << "There is something wrong in calculating reaction force!\n";
break;
case 21:
cout << "There is something wrong in calculating internal force!\n";
break;
case 22:
cout << "There is something wrong in calculating internal force of cantilever!\n";
break;
case 23:
cout << "There is something wrong in calculating internal force of displacement!\n";
break;
case 24:
cout << "There is no such file!\n";
break;
case 25:
cout << "!\n";
break;
default:
break;
}
status = 4; //error
return 1;
}
// print input error
bool sfPrintError(int row, int column)
{
if (column == 1)
cout << "Error! row: " << row << " column: 1 : head is mismathced!\n";
else
cout << "Error! row: " << row << " column: " << column << " : data input failed!\n";
status = 4; // error
return 1;
}
public:
SpaceFrame();
SpaceFrame(SpaceFrame &);
~SpaceFrame();
// read data from .csv
bool sfInput();
// calculate
bool sfCalculate(bool, bool, double);
// output data
bool sfOutput(bool);
// create circular structure
bool sfCircularStructure(int, int, int);
};
SpaceFrame::SpaceFrame()
{
EPS = 1e-15;
MAXTS = 0;
MAXLV = 0;
TNN = 0; // total number of nodes
NFIN = 0; // number of fixed nodes
NFRN = 0; // number of free nodes
NOR = 0; // number of rods
NOL = 0; // number of loads
NOS = 0; // number of sections
nodes = NULL; // parameters of nodes
rods = NULL; // parameters of rods
loads = NULL; // parameters of loads
sections = NULL; // parameters of sections
TotalStiffness = NULL; // total stiffness
LoadVector = NULL; // load vector
Displacement = NULL; // the displacement of nodes
IV = NULL; // the location of diagonal element
NSI = 0; // upper limit
MAXIBDW = 0; // half bandwidth
ProgressBar = 1; // open progress bar
Parallel = 1; // open parallel
status = 0; // initialization is completed
}
SpaceFrame::SpaceFrame(SpaceFrame &Frame)
{
if (Frame.status == 0 || Frame.status == 4)
{
status = 0; // initialization is completed
EPS = Frame.EPS;
MAXTS = 0;
MAXLV = 0;
TNN = 0; // total number of nodes
NFIN = 0; // number of fixed nodes
NFRN = 0; // number of free nodes
NOR = 0; // number of rods
NOL = 0; // number of loads
NOS = 0; // number of sections
nodes = NULL; // parameters of nodes
rods = NULL; // parameters of rods
loads = NULL; // parameters of loads
sections = NULL; // parameters of sections
TotalStiffness = NULL; // total stiffness
LoadVector = NULL; // load vector
Displacement = NULL; // the displacement of nodes
IV = NULL; // the location of diagonal element
NSI = 0; // upper limit
MAXIBDW = 0; // half bandwidth
ProgressBar = Frame.ProgressBar;
Parallel = Frame.Parallel;
}
else
{
status = Frame.status;
EPS = Frame.EPS;
MAXTS = Frame.MAXTS;
MAXLV = Frame.MAXLV;
TNN = Frame.TNN;
NFIN = Frame.NFIN;
NFRN = Frame.NFRN;
NOR = Frame.NOR;
NOL = Frame.NOL;
NOS = Frame.NOS;
if (Frame.nodes != NULL)
{
nodes = new Node[TNN]();
memcpy(nodes, Frame.nodes, TNN * sizeof(Node));
}
if (Frame.rods != NULL)
{
rods = new Rod[NOR]();
memcpy(rods, Frame.rods, NOR * sizeof(Rod));
}
if (Frame.loads != NULL)
{
loads = new Load[NOL]();
memcpy(loads, Frame.loads, NOL * sizeof(Load));
}
if (Frame.sections != NULL)
{
sections = new Section[NOS]();
memcpy(sections, Frame.sections, NOS * sizeof(Section));
}
int dof = 6 * NFRN;
if (Frame.IV != NULL)
{
IV = new int[dof]();
memcpy(IV, Frame.IV, dof * sizeof(int));
}
NSI = Frame.NSI;
MAXIBDW = Frame.MAXIBDW;
if (Frame.TotalStiffness != NULL)
{
TotalStiffness = new double[NSI]();
memcpy(TotalStiffness, Frame.TotalStiffness, NSI * sizeof(double));
}
if (Frame.LoadVector != NULL)
{
LoadVector = new double[dof]();
memcpy(LoadVector, Frame.LoadVector, dof * sizeof(double));
}
if (Frame.Displacement != NULL)
{
Displacement = new double[dof]();
memcpy(Displacement, Frame.Displacement, dof * sizeof(double));
}
ProgressBar = Frame.ProgressBar;
Parallel = Frame.Parallel;
}
}
SpaceFrame::~SpaceFrame()
{
MAXTS = 0;
MAXLV = 0;
TNN = 0;
NFIN = 0;
NFRN = 0;
NOR = 0;
NOL = 0;
NOS = 0;
NSI = 0;
MAXIBDW = 0;
delete[] nodes;
nodes = NULL;
delete[] rods;
rods = NULL;
delete[] loads;
loads = NULL;
delete[] sections;
sections = NULL;
delete[] TotalStiffness;
TotalStiffness = NULL;
delete[] LoadVector;
LoadVector = NULL;
delete[] Displacement;
Displacement = NULL;
delete[] IV;
IV = NULL;
status = 0; // initialization is completed
}
bool SpaceFrame::sfInput()
{
if (status)
this->~SpaceFrame();
const int one = 1;
struct Row
{
char head[10];
const int &cnt;
} rows[23] = {
{"TNN", one},
{"NFIN", one},
{"NOR", one},
{"NOL", one},
{"NOS", one},
{"XCN", TNN},
{"YCN", TNN},
{"ZCN", TNN},
{"BNR", NOR},
{"ENR", NOR},
{"ELASTIC", NOR},
{"SHEAR", NOR},
{"AREA", NOR},
{"IMY", NOR},
{"IMZ", NOR},
{"THETA", NOR},
{"NRL", NOL},
{"PLI", NOL},
{"KOL", NOL},
{"VOL", NOL},
{"DLB", NOL},
{"NRS", NOS},
{"DSB", NOS}};
int rowIndex = 0; // Reset the number of rows to zero
char buf[10] = {0}; // buffer and data string
ifstream fin("source&result/sf_test.csv", ios::in);
if (!fin)
return sfPrintError(24);
rowIndex = 1;
fin.ignore(1000000, '\n'); // skip first line
rowIndex = 2;
fin.getline(buf, 10, ',');
if (strcmp(rows[rowIndex - 2].head, buf))
return sfPrintError(rowIndex, 1);
for (int i = 0; i < rows[rowIndex - 2].cnt; i++)
if (!(fin >> TNN))
return sfPrintError(rowIndex, i + 1);
fin.ignore(1000000, '\n');
rowIndex = 3;
fin.getline(buf, 10, ',');
if (strcmp(rows[rowIndex - 2].head, buf))
return sfPrintError(rowIndex, 1);
for (int i = 0; i < rows[rowIndex - 2].cnt; i++)
if (!(fin >> NFIN))
return sfPrintError(rowIndex, i + 1);
fin.ignore(1000000, '\n');
rowIndex = 4;
fin.getline(buf, 10, ',');
if (strcmp(rows[rowIndex - 2].head, buf))
return sfPrintError(rowIndex, 1);
for (int i = 0; i < rows[rowIndex - 2].cnt; i++)
if (!(fin >> NOR))
return sfPrintError(rowIndex, i + 1);
fin.ignore(1000000, '\n');
rowIndex = 5;
fin.getline(buf, 10, ',');
if (strcmp(rows[rowIndex - 2].head, buf))
return sfPrintError(rowIndex, 1);
for (int i = 0; i < rows[rowIndex - 2].cnt; i++)
if (!(fin >> NOL))
return sfPrintError(rowIndex, i + 1);
fin.ignore(1000000, '\n');
rowIndex = 6;
fin.getline(buf, 10, ',');
if (strcmp(rows[rowIndex - 2].head, buf))
return sfPrintError(rowIndex, 1);
for (int i = 0; i < rows[rowIndex - 2].cnt; i++)
if (!(fin >> NOS))
return sfPrintError(rowIndex, i + 1);
fin.ignore(1000000, '\n');
NFRN = TNN - NFIN;
nodes = new Node[TNN]();
rods = new Rod[NOR]();
loads = new Load[NOL]();
sections = new Section[NOS]();
for (rowIndex = 7; rowIndex <= 24; rowIndex++)
{
fin.getline(buf, 10, ',');
if (strcmp(rows[rowIndex - 2].head, buf))
return sfPrintError(rowIndex, 1);
for (int i = 0; i < rows[rowIndex - 2].cnt; i++)
{
switch (rowIndex)
{
case 7:
if (!(fin >> nodes[i].XCN))
return sfPrintError(rowIndex, i + 1);
break;
case 8:
if (!(fin >> nodes[i].YCN))
return sfPrintError(rowIndex, i + 1);
break;
case 9:
if (!(fin >> nodes[i].ZCN))
return sfPrintError(rowIndex, i + 1);
break;
case 10:
if (!(fin >> rods[i].BNR))
return sfPrintError(rowIndex, i + 1);
break;
case 11:
if (!(fin >> rods[i].ENR))
return sfPrintError(rowIndex, i + 1);
break;
case 12:
if (!(fin >> rods[i].ELASTIC))
return sfPrintError(rowIndex, i + 1);
break;
case 13:
if (!(fin >> rods[i].SHEAR))
return sfPrintError(rowIndex, i + 1);
break;
case 14:
if (!(fin >> rods[i].AREA))
return sfPrintError(rowIndex, i + 1);
break;
case 15:
if (!(fin >> rods[i].IMY))
return sfPrintError(rowIndex, i + 1);
break;
case 16:
if (!(fin >> rods[i].IMZ))
return sfPrintError(rowIndex, i + 1);
break;
case 17:
if (!(fin >> rods[i].THETA))
return sfPrintError(rowIndex, i + 1);
break;
case 18:
if (!(fin >> loads[i].NRL))
return sfPrintError(rowIndex, i + 1);
break;
case 19:
if (!(fin >> loads[i].PLI))
return sfPrintError(rowIndex, i + 1);
break;
case 20:
if (!(fin >> loads[i].KOL))
return sfPrintError(rowIndex, i + 1);
break;
case 21:
if (!(fin >> loads[i].VOL))
return sfPrintError(rowIndex, i + 1);
break;
case 22:
if (!(fin >> loads[i].DLB))
return sfPrintError(rowIndex, i + 1);
break;
case 23:
if (!(fin >> sections[i].NRS))
return sfPrintError(rowIndex, i + 1);
break;
case 24:
if (!(fin >> sections[i].DSB))
return sfPrintError(rowIndex, i + 1);
break;
}
fin.get();
}
fin.ignore(1000000, '\n');
}
fin.close();
status = 1; // input procedure is completed
return 0;
}
bool SpaceFrame::sfOutput(bool terminal = false) // terminal on/off
{
if (status == 2 && terminal) // terminal
{
sfPrintLine();
cout << setw(80) << "Calculation Of Space Rigid Frame\n";
sfPrintLine();
cout << "| TNN = " << setw(9) << TNN << " | NFIN = " << setw(8) << NFIN << " | NFRN = " << setw(8) << NFRN << " | NOR = " << setw(9) << NOR << " | NOL = " << setw(9) << NOL << " | NOS = " << setw(9) << NOS << " | |\n";
sfPrintLine();
cout << "| Nodes | Coordinate-X | Coordinate-Y | Coordinate-Z | | |\n";
for (int i = 0; i < TNN; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(15) << nodes[i].XCN << " | " << setw(15) << nodes[i].YCN << " | " << setw(15) << nodes[i].ZCN << " | | | |\n";
sfPrintLine();
cout << "| Rods | Left - Right | Elastic Modulus | Shear modulus | Area | Inertia Y Axis | Inertia Z Axis |\n";
for (int i = 0; i < NOR; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(6) << rods[i].BNR << " - " << left << setw(6) << rods[i].ENR << " | " << right << setw(15) << rods[i].ELASTIC << " | " << setw(15) << rods[i].SHEAR << " | " << setw(15) << rods[i].AREA << " | " << setw(15) << rods[i].IMY << " | " << setw(15) << rods[i].IMZ << " |\n";
sfPrintLine();
cout << "| Sections | Rods | Distance | | | |\n";
for (int i = 0; i < NOS; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(15) << sections[i].NRS << " | " << setw(15) << sections[i].DSB << " | | | | |\n";
sfPrintLine();
cout << "| Nodes | Displacement-X | Displacement-Y | Displacement-Z | Diversion-X | Diversion-Y | Diversion-Z |\n";
for (int i = NFIN; i < TNN; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(15) << Displacement[6 * (i - NFIN)] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 1] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 2] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 3] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 4] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 5] << " |\n";
sfPrintLine();
cout << "| Sections | Axial force-X | Shear force-Y | Shear force-Z | Torque-X | Bending-Y | Bending-Z |\n";
for (int i = 0; i < NOS; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(15) << sections[i].IFS[0] << " | " << setw(15) << sections[i].IFS[1] << " | " << setw(15) << sections[i].IFS[2] << " | " << setw(15) << sections[i].IFS[3] << " | " << setw(15) << sections[i].IFS[4] << " | " << setw(15) << sections[i].IFS[5] << " |\n";
sfPrintLine();
}
if (status == 2) // file
{
ofstream fout("source&result/sfResultClass.csv", ios::out);
fout << setw(80) << "Calculation Of Space Rigid Frame,\n";
fout << "TNN = " << setw(9) << TNN << " , NFIN = " << setw(8) << NFIN << " , NFRN = " << setw(8) << NFRN << " , NOR = " << setw(9) << NOR << " , NOL = " << setw(9) << NOL << " , NOS = " << setw(9) << NOS << " , ,\n";
fout << "Nodes , Coordinate-X , Coordinate-Y , Coordinate-Z , , ,\n";
for (int i = 0; i < TNN; i++)
fout << setw(15) << i + 1 << " , " << setw(15) << nodes[i].XCN << " , " << setw(15) << nodes[i].YCN << " , " << setw(15) << nodes[i].ZCN << " , , , ,\n";
fout << "Rods , Left - Right , Elastic Modulus , Shear modulus , Area , Inertia Y Axis , Inertia Z Axis ,\n";
for (int i = 0; i < NOR; i++)
fout << setw(15) << i + 1 << " , " << setw(6) << rods[i].BNR << " - " << left << setw(6) << rods[i].ENR << " , " << right << setw(15) << rods[i].ELASTIC << " , " << setw(15) << rods[i].SHEAR << " , " << setw(15) << rods[i].AREA << " , " << setw(15) << rods[i].IMY << " , " << setw(15) << rods[i].IMZ << " ,\n";
fout << "Sections , Rods , Distance , , , ,\n";
for (int i = 0; i < NOS; i++)
fout << setw(15) << i + 1 << " , " << setw(15) << sections[i].NRS << " , " << setw(15) << sections[i].DSB << " , , , , ,\n";
fout << "Nodes , Displacement-X , Displacement-Y , Displacement-Z , Diversion-X , Diversion-Y , Diversion-Z ,\n";
for (int i = NFIN; i < TNN; i++)
fout << setw(15) << i + 1 << " , " << setw(15) << Displacement[6 * (i - NFIN)] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 1] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 2] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 3] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 4] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 5] << " ,\n";
fout << "Sections , Axial force-X , Shear force-Y , Shear force-Z , Torque-X , Bending-Y , Bending-Z ,\n";
for (int i = 0; i < NOS; i++)
fout << setw(15) << i + 1 << " , " << setw(15) << sections[i].IFS[0] << " , " << setw(15) << sections[i].IFS[1] << " , " << setw(15) << sections[i].IFS[2] << " , " << setw(15) << sections[i].IFS[3] << " , " << setw(15) << sections[i].IFS[4] << " , " << setw(15) << sections[i].IFS[5] << " ,\n";
fout.close();
}
else
cout << "Calculation is not completed!\n";
return 0;
}
bool SpaceFrame::sfCalculate(bool parallel = true, bool progress_bar = true, double eps = -1)
{
if (status == 0 || status == 4)
{
cout << "There is something wrong in Data input!\n";
return 0;
}
ProgressBar = progress_bar, Parallel = parallel;
if (eps >= 0 && eps <= 1)
EPS = eps;
if (sfLCosSin()) // calculate the length, cosine and sine of all rods
return sfPrintError(6);
else
cout << "Calculating length, cosine and sine succeed!\n";
if (sfAllocate())
return sfPrintError(15);
else
cout << "Allocating Variable Bandwith Matrix succeed!\n";
if (sfBuildTotalStiff()) // build total stiffness matrix
return sfPrintError(2);
else
cout << "Building total stiffness matrix succeeded!\n";
if (sfBuildLoadVector()) // build load stiffness vector
return sfPrintError(3);
else
cout << "Building load vector succeeded!\n";
if (Parallel)
{
if (sfConjugateGradientPar(TotalStiffness, LoadVector, Displacement, 6 * NFRN)) // solve matrix equation
return sfPrintError(4);
else
cout << "Solving equation succeeded!\n";
}
else
{
if (sfConjugateGradient(TotalStiffness, LoadVector, Displacement, 6 * NFRN)) // solve matrix equation
return sfPrintError(4);
else
cout << "Solving equation succeeded!\n";
}
for (int i = 0; i < NOS; i++)
if (sfInternalForce(i, sections[i].NRS, sections[i].DSB)) // calculate the internal force of each rods
return sfPrintError(5);
cout << "Outputing data succeed!\n";
status = 2; // calculation is completed
return 0;
}
bool SpaceFrame::sfCircularStructure(int m, int n, int l)
{
ofstream fout("source&result/sf_test.csv", ios::out);
fout << "Stress Test, degree of freedom is " << ((m + 1) * (n + 1) * (l + 1) - (m + 1) * (n + 1)) * 6 << ",\n";
fout << "TNN," << (m + 1) * (n + 1) * (l + 1) << ",\n";
fout << "NFIN," << (m + 1) * (n + 1) << ",\n";
int nor = ((2 * m + 1) * (2 * n + 1) - m * n) * l;
fout << "NOR," << nor << ",\n";
fout << "NOL," << (m + 1) * (n + 1) << ",\n";
fout << "NOS," << (m + 1) * (n + 1) << ",\n";
fout << "XCN,";
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fout << k << ",";
fout << "\n";
fout << "YCN,";
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fout << j << ",";
fout << "\n";
fout << "ZCN,";
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fout << i << ",";
fout << "\n";
fout << "BNR,";
for (int i = 0; i < l; i++)
{
for (int j = 0; j < (m + 1) * (n + 1); j++)
fout << j + 1 + i * (m + 1) * (n + 1) << ",";
for (int j = 0; j < (m + 1) * n; j++)
fout << j + 1 + (i + 1) * (m + 1) * (n + 1) << ",";
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m; k++)
fout << k + 1 + j * (m + 1) + (i + 1) * (m + 1) * (n + 1) << ",";
}
fout << "\n";
fout << "ENR,";
for (int i = 0; i < l; i++)
{
for (int j = 0; j < (m + 1) * (n + 1); j++)
fout << j + 1 + (i + 1) * (m + 1) * (n + 1) << ",";
for (int j = 0; j < (m + 1) * n; j++)
fout << j + 1 + m + 1 + (i + 1) * (m + 1) * (n + 1) << ",";
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m; k++)
fout << k + 2 + j * (m + 1) + (i + 1) * (m + 1) * (n + 1) << ",";
}
fout << "\n";
fout << "ELASTIC,";
for (int i = 0; i < nor; i++)
fout << 210000000 + 100000 * (rand() % 1000) << ",";
fout << "\n";
fout << "SHEAR,";
for (int i = 0; i < nor; i++)
fout << 80769000 << ",";
fout << "\n";
fout << "AREA,";
for (int i = 0; i < nor; i++)
fout << 0.007854 << ",";
fout << "\n";
fout << "IMY,";
for (int i = 0; i < nor; i++)
fout << 0.0000040001 + 0.0000000001 * (rand() % 10000) << ",";
fout << "\n";
fout << "IMZ,";
for (int i = 0; i < nor; i++)
fout << 0.0000040001 + 0.0000000001 * (rand() % 10000) << ",";
fout << "\n";
fout << "THETA,";
for (int i = 0; i < nor; i++)
fout << 0 << ",";
fout << "\n";
fout << "NRL,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << i + 1 + ((2 * m + 1) * (2 * n + 1) - m * n) * (l - 1) << ",";
fout << "\n";
fout << "PLI,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 0 << ",";
fout << "\n";
fout << "KOL,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 3 << ",";
fout << "\n";
fout << "VOL,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 1000 + rand() % 1000 << ",";
fout << "\n";
fout << "DLB,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 1 << ",";
fout << "\n";
fout << "NRS,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << i + 1 << ",";
fout << "\n";
fout << "DSB,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 0.5 << ",";
fout << "\nEND,";
fout.close();
return 0;
}
|
04_touch_by_all.c |
/* ────────────────────────────────────────────────────────────────────────── *
│ │
│ This file is part of the exercises for the Lectures on │
│ "Foundations of High Performance Computing" │
│ given at │
│ Master in HPC and │
│ Master in Data Science and Scientific Computing │
│ @ SISSA, ICTP and University of Trieste │
│ │
│ contact: luca.tornatore@inaf.it │
│ │
│ This is free software; you can redistribute it and/or modify │
│ it under the terms of the GNU General Public License as published by │
│ the Free Software Foundation; either version 3 of the License, or │
│ (at your option) any later version. │
│ This code is distributed in the hope that it will be useful, │
│ but WITHOUT ANY WARRANTY; without even the implied warranty of │
│ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the │
│ GNU General Public License for more details. │
│ │
│ You should have received a copy of the GNU General Public License │
│ along with this program. If not, see <http://www.gnu.org/licenses/> │
│ │
* ────────────────────────────────────────────────────────────────────────── */
/*
* COMPILE LINE (icc): -Ofast -fno-alias -xCORE-AVX2 -xHost -fma -use-intel-optimized-headers -falign-loops -qopenmp -parallel -pthread -ipo -vec
*/
#if defined(__STDC__)
#if (__STDC_VERSION__ >= 199901L)
#define _XOPEN_SOURCE 700
#endif
#endif
#define _GNU_SOURCE
#include <omp.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/syscall.h>
#include <time.h>
#include <unistd.h>
#define N_default 1000
#define CPU_TIME (clock_gettime(CLOCK_REALTIME, &ts), (double)ts.tv_sec + (double)ts.tv_nsec * 1e-9)
#define CPU_TIME_T (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &myts), (double)myts.tv_sec + (double)myts.tv_nsec * 1e-9)
#define CPU_TIME_P (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts), (double)ts.tv_sec + (double)ts.tv_nsec * 1e-9)
#define CPU_ID_ENTRY_IN_PROCSTAT 39
#define HOSTNAME_MAX_LENGTH 200
int read_proc__self_stat(int, int*);
int get_cpu_id(void);
int main(int argc, char** argv)
{
int N = N_default;
int nthreads = 1;
struct timespec ts;
double* array;
/* -----------------------------------------------------------------------------
* initialize
* -----------------------------------------------------------------------------
*/
// check whether some arg has been passed on
if (argc > 1)
N = atoi(*(argv + 1));
if ((array = (double*)malloc(N * sizeof(double))) == NULL)
{
printf("I'm sorry, on some thread there is not"
"enough memory to host %lu bytes\n",
(unsigned int)N * sizeof(double));
return 1;
}
// just give notice of what will happen and get the number of threads used
#pragma omp parallel
{
#pragma omp master
{
#ifdef _OPENMP
nthreads = omp_get_num_threads();
printf("omp ");
#else
nthreads = 1;
printf("Running not linked to OpenMP: not getting thread info.\nThread nr. set to 1\n");
#endif
printf("summation with %d threads\n", nthreads);
}
#ifdef _OPENMP
int me = omp_get_thread_num();
#endif
//#pragma omp critical
#ifdef _OPENMP
//printf("thread %2d is running on core %2d\n", me, get_cpu_id());
#endif
}
// initialize the array;
// each thread is "touching"
// its own memory as long as
// the parallel for has the
// scheduling as the final one
#pragma omp parallel for
for (int ii = 0; ii < N; ii++)
array[ii] = (double)ii;
/* -----------------------------------------------------------------------------
* calculate
* -----------------------------------------------------------------------------
*/
double S = 0; // this will store the summation
double tstart = CPU_TIME;
#pragma omp parallel for reduction(+ : S)
for (int ii = 0; ii < N; ii++)
S += array[ii];
double tend = CPU_TIME;
/* -----------------------------------------------------------------------------
* finalize
* -----------------------------------------------------------------------------
*/
// printf("Sum is %g, process took <%g> of wall-clock time\n", S, tend - tstart);
// printf("%g SUM\n\n\n"
// "%g WALL\n",
// S, tend - tstart);
printf("%g SUM\n\n\n"
"%g\n",
S, tend - tstart);
free(array);
return 0;
}
int get_cpu_id(void)
{
#if defined(_GNU_SOURCE) // GNU SOURCE ------------
return sched_getcpu();
#else
#ifdef SYS_getcpu // direct sys call ---
int cpuid;
if (syscall(SYS_getcpu, &cpuid, NULL, NULL) == -1)
return -1;
else
return cpuid;
#else
unsigned val;
if (read_proc__self_stat(CPU_ID_ENTRY_IN_PROCSTAT, &val) == -1)
return -1;
return (int)val;
#endif // -----------------------
#endif
}
int read_proc__self_stat(int field, int* ret_val)
/*
Other interesting fields:
pid : 0
father : 1
utime : 13
cutime : 14
nthreads : 18
rss : 22
cpuid : 39
read man /proc page for fully detailed infos
*/
{
// not used, just mnemonic
// char *table[ 52 ] = { [0]="pid", [1]="father", [13]="utime", [14]="cutime", [18]="nthreads", [22]="rss", [38]="cpuid"};
*ret_val = 0;
FILE* file = fopen("/proc/self/stat", "r");
if (file == NULL)
return -1;
char* line = NULL;
int ret;
size_t len;
ret = (int)getline(&line, &len, file);
fclose(file);
if (ret == -1)
return -1;
char* savetoken = line;
char* token = strtok_r(line, " ", &savetoken);
--field;
do
{
token = strtok_r(NULL, " ", &savetoken);
field--;
} while (field);
*ret_val = atoi(token);
free(line);
return 0;
}
|
vector.h | #ifndef VECTOR_H
#define VECTOR_H
#include <omp.h>
#include <sys/time.h>
#include "matrix.h"
ull vrows(ull r, // Matrix rows
ull c, // Matrix cols
ul threads)
{
timeval start, end;
matrix* vA = alloc(1, r),
* A = alloc(r, c),
* vB = alloc(1, c); // Result
fill(vA);
fill(A);
/**
* Data split by rows
*/
gettimeofday(&start, NULL);
{
ull i = 0, j = 0;
#pragma omp parallel private(i, j) shared(vB) num_threads(threads)
{
#pragma omp for
iterate(, i, A->rows) {
iterate(, j, A->cols) { vB(i) += vA(j) * A(i, j); }
}
}
}
gettimeofday(&end, NULL);
#ifdef WRITE
printf("\tResult matrix is written to `vector.txt`\n");
write(vB, "vector.txt");
#endif
dealloc(vA);
dealloc(A);
dealloc(vB);
return ELAPSED;
}
ull vcols(ull r, ull c, ul threads)
{
timeval start, end;
matrix* vA = alloc(1, r),
* A = alloc(r, c),
* vB = alloc(1, c); // Result
fill(vA);
fill(A);
/**
* Data split by columns
*/
gettimeofday(&start, NULL);
{
ull i = 0, j = 0;
#pragma omp parallel private(i, j) shared(vB) num_threads(threads)
{
T dot = 0;
iterate(, i, A->rows) {
#pragma omp for
iterate(, j, A->cols) { dot += vA(j) * A(i, j); }
#pragma omp critical
{
vB(i) += dot;
dot = 0;
}
}
}
}
gettimeofday(&end, NULL);
#ifdef WRITE
write(vB, "vector.txt");
#endif
dealloc(vA);
dealloc(A);
dealloc(vB);
return ELAPSED;
}
ull vblocks(ull r, ull c, ul threads)
{
timeval start, end;
matrix* vA = alloc(1, r),
* A = alloc(r, c),
* vB = alloc(1, c); // Result
fill(vA);
fill(A);
/**
* Data split by blocks
*/
gettimeofday(&start, NULL);
#pragma omp parallel shared(vB) num_threads(threads)
{
ull lt = omp_get_num_threads(),
bv = lt, // Vertical blocks
bh = lt; // Horizontal blocks
#pragma omp for collapse(2)
iterate(ull, iv, lt) {
iterate(ull, ih, lt) {
for (ull i = iv * A->rows / bv; i < (iv + 1) * A->rows / bv; ++i) {
for (ull j = ih * A->cols / bh; j < (ih + 1) * A->cols / bh; ++j) {
#pragma omp atomic
vB(i) += A(i, j) * vA(i);
}
}
}
}
}
gettimeofday(&end, NULL);
#ifdef WRITE
write(vB, "vector.txt");
#endif
dealloc(vA);
dealloc(A);
dealloc(vB);
return ELAPSED;
}
#endif // VECTOR_H
|
aggregate_ops.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#ifndef LIBND4J_AGGREGATE_OPS_H
#define LIBND4J_AGGREGATE_OPS_H
#include <ops/ops.h>
#include <templatemath.h>
#define HS_MAX_EXP 6.0f
#ifdef __CUDACC__
#define aggregate_def __device__ inline static
#else
#include <ops/gemm.h>
#define aggregate_def inline static
#endif
/*
*
*
* Aggregate Ops are special things suited for CUDA mostly. They are meant to be executed within single block ONLY.
* So, when batched, they should provide proper parallelism levels on poorly parallel tasks otherwise.
*
* On CPU aggregate ops are trying to minimize OpenMP multi-threading use, only SIMD is enforced
*
*
*/
namespace aggregateOps {
template<typename T>
class GEMM {
public:
#ifdef __CUDACC__
aggregate_def void executeAggregateCuda(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
// no-op
}
#endif
#ifndef __CUDACC__
static CBLAS_ORDER convertOrder(int from) {
switch(from) {
//'c'
case 99:
return CblasRowMajor;
//'C'
case 67: return CblasRowMajor;
//'f'
case 102: return CblasColMajor;
//'F'
case 70: return CblasColMajor;
default: return CblasColMajor;
}
}
static CBLAS_TRANSPOSE convertTranspose(int from) {
switch(from) {
//'t'
case 116: return CblasTrans;
//'T'
case 84: return CblasTrans;
//'n'
case 110: return CblasNoTrans;
//'N'
case 78: return CblasNoTrans;
//'c'
case 99: return CblasConjTrans;
//'C'
case 67: return CblasConjTrans;
default: return CblasNoTrans;
}
}
#endif
#ifndef __CUDACC__
aggregate_def void executeAggregate(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
int M = indexArguments[0];
int N = indexArguments[1];
int K = indexArguments[2];
int lda = indexArguments[3];
int ldb = indexArguments[4];
int ldc = indexArguments[5];
int TransA = indexArguments[6];
int TransB = indexArguments[7];
int Order = indexArguments[8];
T alpha = realArguments[0];
T beta = realArguments[1];
T *A = arguments[0];
T *B = arguments[1];
T *C = arguments[2];
nd4j::blas::GEMM<T, T, T>::op(convertOrder(Order), convertTranspose(TransA), convertTranspose(TransB),M,N,K,(T) alpha,A,lda,B,ldb,(T) beta,C,ldc);
}
#else
aggregate_def void executeAggregate(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
// stub for nvcc
}
#endif
};
/**
* We don't include this class into ops directly, since it won't be ever used directly,
* Only as part of SkipGram or CBOW
*/
template<typename T>
class HierarchicSoftmax {
private:
public:
aggregate_def void executeAggregate(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
int vectorLength = indexArguments[0];
int expLength = indexArguments[1];
int code = indexArguments[2];
int isInference = indexArguments[3];
T *syn0 = arguments[0]; // we pass row pointer here
T *syn1 = arguments[1]; // we pass row pointer here
T *expTable = arguments[2];
T *neu1e = arguments[3];
T dot(0.0f);
T g(0.0f);
T f(0.0f);
T alpha = realArguments[0];
//nd4j_printf("Vector length: [%i]; expLength: [%i]; Code: [%i]; Inf: [%i]\n", vectorLength, expLength, code, isInference);
// shape::printArray<T>(syn0, vectorLength, "syn0");
// shape::printArray<T>(syn1, vectorLength, "syn1");
// shape::printArray<T>(neu1e, vectorLength, "neu1e");
// dot
//#pragma omp simd reduction(sumT:dot)
for (int x = 0; x < vectorLength; x++) {
dot += syn0[x] * syn1[x];
}
// gradient
if (dot < (T) - HS_MAX_EXP || dot >= (T) HS_MAX_EXP) {
return;
}
int idx = static_cast<int>((dot + HS_MAX_EXP) * ((T) expLength / HS_MAX_EXP / 2.0f));
if (idx >= expLength || idx < 0) {
return;
}
f = expTable[idx];
g = (static_cast<T>(1.0f) - static_cast<T>(code) - f) * alpha;
//nd4j_printf("dot: [%f]; idx: [%i]; f: [%f]; g: [%f]\n", (float) dot, idx, (float) f, (float) g);
// axpy1
#pragma omp simd
for (int x = 0; x < vectorLength; x++) {
neu1e[x] = g * syn1[x] + neu1e[x];
}
// axpy2
if (!isInference) {
#pragma omp simd
for (int x = 0; x < vectorLength; x++) {
syn1[x] = g * syn0[x] + syn1[x];
}
}
}
#ifdef __CUDACC__
aggregate_def void executeAggregateCuda(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
/*
We know that syn0 & syn1 are 2D matrices, so we can just use offsets here
*/
__shared__ int vectorLength;
__shared__ int expLength;
__shared__ int code;
__shared__ int isInference;
T *syn0 = arguments[0];
T *syn1 = arguments[1];
T *expTable = arguments[2];
T *neu1e = arguments[3];
__shared__ T dot;
__shared__ T g;
__shared__ T f;
__shared__ T alpha;
if (threadIdx.x == 0) {
vectorLength = indexArguments[0];
expLength = indexArguments[1];
code = indexArguments[2];
isInference = indexArguments[3];
dot = (T) 0.0f;
alpha = realArguments[0];
}
__syncthreads();
// TODO: it would be great to implement dot without atomicAdd call. like aggregateParticles, or something like that
// dot
for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) {
T prod = syn0[x] * syn1[x];
nd4j::math::atomics::nd4j_atomicAdd<T>(&dot, prod);
}
// gradient
__syncthreads();
if (dot < - (T) HS_MAX_EXP || dot >= (T) HS_MAX_EXP)
return;
int idx = (int) ((dot + HS_MAX_EXP) * ((T) expLength / (T) HS_MAX_EXP / 2.0));
if (idx >= expLength)
return;
if (threadIdx.x == 0) {
// gradient calculation
f = expTable[idx];
g = ((T) 1.0f - (T) code - f) * alpha;
}
__syncthreads();
// axpy1
for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) {
neu1e[x] = g * syn1[x] + neu1e[x];
}
// axpy2
if (!isInference)
for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) {
syn1[x] = g * syn0[x] + syn1[x];
}
}
#endif
};
/**
* We don't include this class into ops directly, since it won't be ever used directly,
* Only as part of SkipGram or CBOW
*/
template<typename T>
class NegativeSampling {
public:
aggregate_def void executeAggregate(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
int vectorLength = indexArguments[0];
int expLength = indexArguments[1];
int code = indexArguments[2];
int isInference = indexArguments[3];
T *syn0 = arguments[0]; // we pass row pointer here
T *syn1Neg = arguments[1]; // we pass row pointer here
T *expTable = arguments[2];
T *neu1e = arguments[3];
T dot = (T) 0.0f;
T g = (T) 0.0f;
T alpha = realArguments[0];
// dot
//#pragma omp simd reduction(sumT:dot)
for (int x = 0; x < vectorLength; x++) {
dot += syn0[x] * syn1Neg[x];
}
if (dot > HS_MAX_EXP)
g = (code - 1) * alpha;
else if (dot < (T) - HS_MAX_EXP)
g = (code - 0) * alpha;
else {
int idx = (int) ((dot + (T) HS_MAX_EXP) * ((T) expLength / HS_MAX_EXP / 2.0));
if (idx >= expLength)
return;
if (idx < 0)
return;
g = ((T) code - expTable[idx]) * alpha;
}
// axpy1
#pragma omp simd
for (int x = 0; x < vectorLength; x++) {
neu1e[x] = g * syn1Neg[x] + neu1e[x];
}
// axpy2
if (!isInference) {
#pragma omp simd
for (int x = 0; x < vectorLength; x++) {
syn1Neg[x] = g * syn0[x] + syn1Neg[x];
}
}
}
#ifdef __CUDACC__
aggregate_def void executeAggregateCuda(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
/*
We know that syn0 & syn1 are 2D matrices, so we can just use offsets here
*/
__shared__ int vectorLength;
__shared__ int expLength;
__shared__ int code;
__shared__ int isInference;
T *syn0 = arguments[0];
T *syn1Neg = arguments[1];
T *expTable = arguments[2];
T *neu1e = arguments[3];
__shared__ T dot;
__shared__ T g;
__shared__ T alpha;
if (threadIdx.x == 0) {
vectorLength = indexArguments[0];
expLength = indexArguments[1];
code = indexArguments[2];
isInference = indexArguments[3];
dot = (T) 0.0f;
alpha = realArguments[0];
}
__syncthreads();
// TODO: it would be great to implement dot without atomicAdd call. like aggregateParticles, or something like that
// dot
for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) {
T prod = syn0[x] * syn1Neg[x];
nd4j::math::atomics::nd4j_atomicAdd<T>(&dot, prod);
}
// gradient
__syncthreads();
int idx = (int) ((dot + (T) HS_MAX_EXP) * ((T) expLength / (T) HS_MAX_EXP / 2.0));
if (idx >= expLength && dot <= (T) HS_MAX_EXP && dot >= (T) -HS_MAX_EXP)
return;
if (threadIdx.x == 0) {
// gradient calculation
if (dot > (T) HS_MAX_EXP)
g = (code - 1) * alpha;
else if (dot < (T) - HS_MAX_EXP)
g = (code - 0) * alpha;
else {
g = ((T) code - expTable[idx]) * alpha;
}
// printf("dot: [%f]; g: [%f]\n", dot, g);
}
__syncthreads();
// printf("before syn1Neg[%i]: [%f], dot: [%f]; g: [%f]; vectorLength: [%i]\n", threadIdx.x, syn1Neg[threadIdx.x], dot, g, vectorLength);
// axpy1
for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) {
neu1e[x] = g * syn1Neg[x] + neu1e[x];
}
// axpy2
if (!isInference)
for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) {
syn1Neg[x] = g * syn0[x] + syn1Neg[x];
}
// printf("after syn1Neg[%i]: [%f]\n", threadIdx.x, syn1Neg[threadIdx.x]);
}
#endif
};
template<typename T>
class Dot {
public:
aggregate_def void executeAggregate(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
T *vecX = arguments[0];
T *vecY = arguments[1];
T *vecZ = arguments[2];
T dot = (T) 0.0f;
int vectorLength = indexArguments[0];
#pragma omp simd reduction(sumT:dot)
for (int x = 0; x < vectorLength; x++) {
dot += vecX[x] * vecY[x];
}
vecZ[0] = dot;
};
#ifdef __CUDACC__
aggregate_def void executeAggregateCuda(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
T *vecX = arguments[0];
T *vecY = arguments[1];
T *vecZ = arguments[2];
int vectorLength = indexArguments[0];
__shared__ T dot;
if (threadIdx.x == 0)
dot = (T) 0.0f;
__syncthreads();
for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) {
T prod = vecX[x] * vecY[x];
nd4j::math::atomics::nd4j_atomicAdd<T>(&dot, prod);
}
__syncthreads();
if (threadIdx.x == 0)
vecZ[0] = dot;
}
#endif
};
template<typename T>
class Axpy {
public:
aggregate_def void executeAggregate(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
T *vecX = arguments[0];
T *vecY = arguments[1];
T alpha = realArguments[0];
int vectorLength = indexArguments[0];
#pragma omp simd
for (int x = 0; x < vectorLength; x++) {
vecY[x] = alpha * vecX[x] + vecY[x];
}
};
#ifdef __CUDACC__
aggregate_def void executeAggregateCuda(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
T *vecX = arguments[0];
T *vecY = arguments[1];
T alpha = realArguments[0];
int vectorLength = indexArguments[0];
for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) {
vecY[x] = alpha * vecX[x] + vecY[x];
}
__syncthreads();
}
#endif
};
template<typename T>
class SkipGram {
public:
aggregate_def void executeAggregate(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
int syn0Row = indexArguments[0];
int vectorLength = indexArguments[1];
int hsRounds = indexArguments[2];
int ngRounds = indexArguments[3];
int expLength = indexArguments[4];
int vocabSize = indexArguments[5];
int ngStarter = indexArguments[6];
int negTableLength = indexArguments[7];
int isInference = indexArguments[8];
auto neu1e = new T[vectorLength];
std::memset(neu1e, 0, sizeof(T) * vectorLength);
T *args[4];
int idxArgs[4];
args[1] = arguments[1]; // syn1
args[2] = arguments[2]; // expTable
args[3] = neu1e;
idxArgs[0] = vectorLength; // vectorLength
idxArgs[1] = expLength; // expLength
idxArgs[3] = isInference;
T *syn1Neg = arguments[3];
T *negTable = arguments[4];
T *inferenceVector = arguments[5];
T *syn0 = isInference == 1 ? inferenceVector : arguments[0] + (syn0Row * vectorLength);
args[0] = syn0;// syn0
int *idxSyn1 = intArrays[0];
int *codes = intArrays[1];
//nd4j_printf("syn0Row: [%i]; vecLen: [%i]; hsRounds: [%i]; ngRounds: [%i]; expLength: [%i]; vocabSize: [%i]; ngStarter: [%i]; negTableLength: [%i]; isInf: [%i]\n", syn0Row, vectorLength, hsRounds, ngRounds, expLength, vocabSize, ngStarter, negTableLength, isInference);
auto next_random = static_cast<unsigned long long>(realArguments[1]);
if (hsRounds > 0) {
for (int r = 0; r < hsRounds; r++) {
args[1] = arguments[1] + (idxSyn1[r] * vectorLength); // syn1 row
idxArgs[2] = codes[r]; // code for row
//nd4j_printf("idx syn1: [%i]; code: [%i]\n", idxSyn1[r], idxArgs[2]);
HierarchicSoftmax<T>::executeAggregate(args, 4, nullptr, 0, idxArgs, 5, nullptr, 0, realArguments, 1);
}
}
int target = ngStarter;
if (ngRounds > 0) {
for (int r = 0; r < ngRounds + 1; r++) {
if (r == 0) {
idxArgs[2] = 1;
} else {
next_random = next_random * (unsigned long long) 25214903917 + 11;
target = negTable[(next_random >> 16) % negTableLength];
if (target <= 0 || target >= vocabSize) target = next_random % (vocabSize - 1) + 1;
if (target == ngStarter)
continue;
idxArgs[2] = 0;
}
args[1] = syn1Neg + (target * vectorLength); // syn1Neg instead of syn1
NegativeSampling<T>::executeAggregate(args, 4, nullptr, 0, idxArgs, 5, nullptr, 0, realArguments, 1);
}
}
//nd4j_printf("applying...\n","");
if (!isInference) {
#pragma omp simd
for (int x = 0; x < vectorLength; x++) {
syn0[x] += neu1e[x];
}
} else {
#pragma omp simd
for (int x = 0; x < vectorLength; x++) {
inferenceVector[x] += neu1e[x];
}
}
delete[] neu1e;
}
#ifdef __CUDACC__
aggregate_def void executeAggregateCuda(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) {
__shared__ int syn0Row;
__shared__ int vectorLength;
__shared__ int hsRounds;
__shared__ int ngRounds;
__shared__ int expLength;
__shared__ int vocabSize;
__shared__ int ngStarter;
__shared__ int negTableLength;
__shared__ int isInference;
__shared__ T *neu1e;
__shared__ T *args[4];
__shared__ int idxArgs[4];
__shared__ unsigned long long next_random;
__shared__ T *negTable;
T *syn1Neg = arguments[3];
__shared__ T *inferenceVector;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
neu1e = (T *) shmem;
syn0Row = indexArguments[0];
vectorLength = indexArguments[1];
hsRounds = indexArguments[2];
ngRounds = indexArguments[3];
expLength = indexArguments[4];
vocabSize = indexArguments[5];
ngStarter = indexArguments[6];
negTableLength = indexArguments[7];
isInference = indexArguments[8];
inferenceVector = arguments[5];
next_random = (unsigned long long) realArguments[1];
args[0] = isInference == 1 ? inferenceVector : arguments[0] + (syn0Row * vectorLength); // syn0
args[1] = arguments[1]; // syn1
args[2] = arguments[2]; // expTable
args[3] = neu1e;
negTable = arguments[4];
idxArgs[0] = vectorLength; // vectorLength
idxArgs[1] = expLength; // expLength
idxArgs[3] = isInference;
}
__syncthreads();
T *syn0 = isInference ? inferenceVector : arguments[0] + (syn0Row * vectorLength);
for (int i = threadIdx.x; i < vectorLength; i+=blockDim.x) {
neu1e[i] = (T) 0.0f;
}
int *idxSyn1 = intArrays[0];
int *codes = intArrays[1];
for (int r = 0; r < hsRounds; r++) {
if (threadIdx.x == 0) {
args[1] = arguments[1] + (idxSyn1[r] * vectorLength);// syn1 row
idxArgs[2] = codes[r]; // code for row
}
__syncthreads();
HierarchicSoftmax<T>::executeAggregateCuda(args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 1);
}
__syncthreads();
__shared__ int target;
if (ngRounds > 0)
for (int r = 0; r < ngRounds + 1; r++) {
if (threadIdx.x == 0) {
if (r == 0) {
// this line isn't a mistake
target = ngStarter;
idxArgs[2] = 1;
} else {
next_random = next_random * (unsigned long long)25214903917 + 11 + blockIdx.x;
target = negTable[(next_random >> 16) % negTableLength];
if (target <= 0 || target >= vocabSize) target = next_random % (vocabSize - 1) + 1;
idxArgs[2] = 0;
}
args[1] = syn1Neg + (target * vectorLength);
}
__syncthreads();
// we put it here, to make sure all threads pick up continue call
if (r != 0 && target == ngStarter)
continue;
NegativeSampling<T>::executeAggregateCuda(args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 1);
}
// final axpy with 1.0f as alpha
if (!isInference)
for (int x = threadIdx.x; x < vectorLength; x+= blockDim.x) {
syn0[x] += neu1e[x];
}
else
for (int x = threadIdx.x; x < vectorLength; x+= blockDim.x) {
inferenceVector[x] += neu1e[x];
}
}
#endif
};
template<typename T>
class CBOW {
public:
aggregate_def void executeAggregate(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments,
int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays,
T *realArguments, int numRealArguments) {
int vectorLength = indexArguments[0];
int hsRounds = indexArguments[1];
int ngRounds = indexArguments[2];
int expLength = indexArguments[3];
int vocabSize = indexArguments[4];
int ngStarter = indexArguments[5];
int negTableLength = indexArguments[6];
int idxSyn0Length = indexArguments[7];
//int initialIdx = indexArguments[8];
int numLabels = indexArguments[9];
int trainWords = indexArguments[10];
int isInference = indexArguments[11];
int *idxSyn0 = intArrays[0];
int *idxSyn1 = intArrays[1];
int *codes = intArrays[2];
T *neu1 = new T[vectorLength];
T *neu1e = new T[vectorLength];
std::memset(neu1, 0, sizeof(T) * vectorLength);
std::memset(neu1e, 0, sizeof(T) * vectorLength);
T *syn0 = arguments[0];
T *syn1 = arguments[1];
T *expTable = arguments[2];
T *syn1Neg = arguments[3];
T *negTable = arguments[4];
T *inferenceVector = arguments[5];
T *args[4];
int idxArgs[4];
idxArgs[0] = vectorLength; // vectorLength
idxArgs[1] = expLength; // expLength
idxArgs[3] = isInference;
unsigned long long next_random = (unsigned long long) realArguments[1];
// building neu1 for current window
for (int c = 0; c < idxSyn0Length; c++) {
T *syn0word = syn0 + (idxSyn0[c] * vectorLength);
#pragma omp simd
for (int i = 0; i < vectorLength; i++) {
neu1[i] += syn0word[i];
}
}
// for inference we use additional inference vector
if (isInference) {
#pragma omp simd
for (int i = 0; i < vectorLength; i++) {
neu1[i] += inferenceVector[i];
}
}
// average neu1
if (idxSyn0Length > 0) {
#pragma omp simd
for (int i = 0; i < vectorLength; i++) {
neu1[i] /= idxSyn0Length + isInference;
}
}
args[0] = neu1;
args[2] = expTable;
args[3] = neu1e;
if (hsRounds > 0)
for (int i = 0; i < hsRounds; i++) {
args[1] = syn1 + (idxSyn1[i] * vectorLength);
idxArgs[2] = codes[i];
HierarchicSoftmax<T>::executeAggregate((T **)args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 2);
}
int target = ngStarter;
if (ngRounds > 0)
for (int i = 0; i < ngRounds + 1; i++) {
if (i == 0) {
idxArgs[2] = 1;
} else {
next_random = next_random * (unsigned long long) 25214903917 + 11;
target = negTable[(next_random >> 16) % negTableLength];
if (target <= 0 || target >= vocabSize) target = next_random % (vocabSize - 1) + 1;
if (target == ngStarter)
continue;
idxArgs[2] = 0;
}
args[1] = syn1Neg + (target * vectorLength); // syn1Neg instead of syn1
//printf("Negative round: target: [%i]; code: [%i]; neu1e[0]: [%f]\n", target, idxArgs[4], neu1e[0]);
NegativeSampling<T>::executeAggregate((T **)args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 2);
}
// if we don't train words - we skip start of idxSyn0
int starter = trainWords == 1 ? 0 : idxSyn0Length - numLabels;
// propagate neu1e -> syn0
if (!isInference) {
for (int c = starter; c < idxSyn0Length; c++) {
T *syn0word = arguments[0] + (idxSyn0[c] * vectorLength);
#pragma omp simd
for (int i = 0; i < vectorLength; i++) {
syn0word[i] += neu1e[i];
}
}
} else {
#pragma omp simd
for (int i = 0; i < vectorLength; i++) {
inferenceVector[i] += neu1e[i];
}
}
delete[] neu1;
delete[] neu1e;
}
#ifdef __CUDACC__
aggregate_def void executeAggregateCuda(T **arguments, int numArguments, Nd4jLong **shapeArguments, int numShapeArguments,
int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays,
T *realArguments, int numRealArguments) {
__shared__ int vectorLength;
__shared__ int hsRounds;
__shared__ int ngRounds;
__shared__ int expLength;
__shared__ int vocabSize;
__shared__ int ngStarter;
__shared__ int negTableLength;
__shared__ int idxSyn0Length;
__shared__ int initialIdx;
__shared__ int numLabels;
__shared__ int trainWords;
__shared__ int isInference;
int *idxSyn0 = intArrays[0];
int *idxSyn1 = intArrays[1];
int *codes = intArrays[2];
__shared__ T *neu1;
__shared__ T *neu1e;
__shared__ T *args[5];
__shared__ int idxArgs[4];
T *syn0 = arguments[0];
T *syn1 = arguments[1];
//T *expTable = arguments[2];
T *syn1Neg = arguments[3];
T *negTable = arguments[4];
T *inferenceVector = arguments[5];
if (threadIdx.x == 0) {
vectorLength = indexArguments[0];
hsRounds = indexArguments[1];
ngRounds = indexArguments[2];
expLength = indexArguments[3];
vocabSize = indexArguments[4];
ngStarter = indexArguments[5];
negTableLength = indexArguments[6];
idxSyn0Length = indexArguments[7];
initialIdx = indexArguments[8];
numLabels = indexArguments[9];
trainWords = indexArguments[10];
isInference = indexArguments[11];
extern __shared__ unsigned char shmem[];
neu1 = (T *) shmem;
neu1e = neu1 + vectorLength;
args[0] = neu1;
args[2] = arguments[2]; //expTable
args[3] = neu1e;
idxArgs[0] = vectorLength; // vectorLength
idxArgs[1] = expLength; // expLength
idxArgs[3] = isInference;
}
__syncthreads();
for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) {
neu1[i] = (T) 0.0f;
neu1e[i] = (T) 0.0f;
}
unsigned long long next_random = (unsigned long long) realArguments[1];
for (int c = 0; c < idxSyn0Length; c++) {
T *syn0word = syn0 + (idxSyn0[c] * vectorLength);
for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) {
neu1[i] += syn0word[i];
}
}
if (isInference)
for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) {
neu1[i] += inferenceVector[i];
}
// average neu1
if (idxSyn0Length > 0) {
for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) {
neu1[i] /= idxSyn0Length + + isInference;
}
}
__syncthreads();
if (hsRounds > 0)
for (int i = 0; i < hsRounds; i++) {
if (threadIdx.x == 0) {
args[1] = syn1 + (idxSyn1[i] * vectorLength);
idxArgs[2] = codes[i];
}
__syncthreads();
HierarchicSoftmax<T>::executeAggregateCuda(args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 2);
}
__shared__ int target;
if (ngRounds > 0)
for (int i = 0; i < ngRounds + 1; i++) {
if (threadIdx.x == 0) {
if (i == 0) {
target = ngStarter;
} else {
next_random = next_random * (unsigned long long) 25214903917 + 11;
target = negTable[(next_random >> 16) % negTableLength];
if (target <= 0 || target >= vocabSize) target = next_random % (vocabSize - 1) + 1;
}
args[1] = syn1Neg + (target * vectorLength); // syn1Neg instead of syn1
idxArgs[2] = i == 0 ? 1 : 0;
}
__syncthreads();
if (i != 0 && target == ngStarter)
continue;
NegativeSampling<T>::executeAggregateCuda(args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 2);
//printf("Negative round: target: [%i]; code: [%i]; neu1[%i]: [%f]; neu1e[%i]: [%f]\n", target, idxArgs[2], threadIdx.x, neu1[threadIdx.x], threadIdx.x, neu1e[threadIdx.x]);
}
// if we don't train words - we skip start of idxSyn0
int starter = trainWords == 1 ? 0 : idxSyn0Length - numLabels;
if (!isInference)
for (int c = starter; c < idxSyn0Length; c++) {
T *syn0word = arguments[0] + (idxSyn0[c] * vectorLength);
for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) {
syn0word[i] += neu1e[i];
}
}
else {
for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) {
inferenceVector[i] += neu1e[i];
}
}
}
#endif
};
}
#endif //LIBND4J_AGGREGATE_OPS_H
|
GB_binop__times_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__times_fp64
// A.*B function (eWiseMult): GB_AemultB__times_fp64
// A*D function (colscale): GB_AxD__times_fp64
// D*A function (rowscale): GB_DxB__times_fp64
// C+=B function (dense accum): GB_Cdense_accumB__times_fp64
// C+=b function (dense accum): GB_Cdense_accumb__times_fp64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_fp64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_fp64
// C=scalar+B GB_bind1st__times_fp64
// C=scalar+B' GB_bind1st_tran__times_fp64
// C=A+scalar GB_bind2nd__times_fp64
// C=A'+scalar GB_bind2nd_tran__times_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x * y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FP64 || GxB_NO_TIMES_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__times_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__times_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__times_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__times_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__times_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__times_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__times_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__times_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__times_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__times_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB_bind1st_tran__times_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB_bind2nd_tran__times_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
8393c2f5_gcc_so4_prot.c | #define _POSIX_C_SOURCE 200809L
#define START_TIMER(S) struct timeval start_ ## S , end_ ## S ; gettimeofday(&start_ ## S , NULL);
#define STOP_TIMER(S,T) gettimeofday(&end_ ## S, NULL); T->S += (double)(end_ ## S .tv_sec-start_ ## S.tv_sec)+(double)(end_ ## S .tv_usec-start_ ## S .tv_usec)/1000000;
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
} ;
int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads_nonaffine, struct profiler * timers)
{
int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data;
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__ ((aligned (64))) = (int (*)[nnz_sp_source_mask_vec->size[1]]) nnz_sp_source_mask_vec->data;
float (*restrict save_src)[save_src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[save_src_vec->size[1]]) save_src_vec->data;
int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__ ((aligned (64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]]) source_id_vec->data;
float (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[source_mask_vec->size[1]][source_mask_vec->size[2]]) source_mask_vec->data;
int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__ ((aligned (64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]]) sp_source_mask_vec->data;
float (*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]]) usol_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
int xb_size = block_sizes[0];
int yb_size = block_sizes[1];
int x0_blk0_size = block_sizes[2];
int y0_blk0_size = block_sizes[3];
for (int time = time_m, t0 = (time)%(3), t1 = (time + 2)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 2)%(3), t2 = (time + 1)%(3))
{
/* Begin section0 */
START_TIMER(section0)
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(x_M - x_m + 1)/nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic,chunk_size)
for (int x = x_m; x <= x_M; x += 1)
{
#pragma omp simd aligned(damp,nnz_sp_source_mask,save_src,source_id,source_mask,sp_source_mask,usol,vp:32)
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
float r9 = -2.5F*usol[t0][x + 4][y + 4][z + 4];
float r8 = 1.0/dt;
float r7 = 1.0/(dt*dt);
float r6 = 1.0/(vp[x + 4][y + 4][z + 4]*vp[x + 4][y + 4][z + 4]);
usol[t2][x + 4][y + 4][z + 4] = (r6*(-r7*(-2.0F*usol[t0][x + 4][y + 4][z + 4] + usol[t1][x + 4][y + 4][z + 4])) + r8*(damp[x + 1][y + 1][z + 1]*usol[t0][x + 4][y + 4][z + 4]) + (r9 - 8.33333333e-2F*(usol[t0][x + 4][y + 4][z + 2] + usol[t0][x + 4][y + 4][z + 6]) + 1.33333333F*(usol[t0][x + 4][y + 4][z + 3] + usol[t0][x + 4][y + 4][z + 5]))/((h_z*h_z)) + (r9 - 8.33333333e-2F*(usol[t0][x + 4][y + 2][z + 4] + usol[t0][x + 4][y + 6][z + 4]) + 1.33333333F*(usol[t0][x + 4][y + 3][z + 4] + usol[t0][x + 4][y + 5][z + 4]))/((h_y*h_y)) + (r9 - 8.33333333e-2F*(usol[t0][x + 2][y + 4][z + 4] + usol[t0][x + 6][y + 4][z + 4]) + 1.33333333F*(usol[t0][x + 3][y + 4][z + 4] + usol[t0][x + 5][y + 4][z + 4]))/((h_x*h_x)))/(r6*r7 + r8*damp[x + 1][y + 1][z + 1]);
}
int sp_zi_M = nnz_sp_source_mask[x][y] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x][y][sp_zi];
float r0 = save_src[time][source_id[x][y][zind]]*source_mask[x][y][zind];
usol[t2][x + 4][y + 4][zind + 4] += r0;
}
}
}
}
STOP_TIMER(section0,timers)
/* End section0 */
}
return 0;
}
/* Backdoor edit at Wed Jan 20 16:52:34 2021*/
|
DRB062-matrixvector2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: inner level parallelization.
*/
#define N 1000
#include <omp.h>
double a[1000][1000];
double v[1000];
double v_out[1000];
int init()
{
int i;
int j;
int k;
#pragma omp parallel for private (i,j)
for (i = 0; i <= 999; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 999; j += 1) {
a[i][j] = (i * j);
}
v_out[i] = (i * j);
v[i] = (i * j);
}
return 0;
}
void mv()
{
int i;
int j;
#pragma omp parallel for private (i,j)
for (i = 0; i <= 999; i += 1) {
float sum = 0.0;
#pragma omp parallel for private (j) reduction (+:sum)
for (j = 0; j <= 999; j += 1) {
sum += a[i][j] * v[j];
}
v_out[i] = sum;
}
}
int print()
{
int i;
int j;
int k;
for (i = 0; i <= 999; i += 1) {
for (j = 0; j <= 999; j += 1) {
printf("%lf\n",a[i][j]);
}
printf("%lf\n",v_out[i]);
printf("%lf\n",v[i]);
}
return 0;
}
int main()
{
init();
mv();
print();
return 0;
}
|
Efficient_RANSAC.h | // Copyright (c) 2015 INRIA Sophia-Antipolis (France).
// All rights reserved.
//
// This file is part of CGAL (www.cgal.org).
//
// $URL: https://github.com/CGAL/cgal/blob/releases/CGAL-5.0.2/Shape_detection/include/CGAL/Shape_detection/Efficient_RANSAC/Efficient_RANSAC.h $
// $Id: Efficient_RANSAC.h 254d60f 2019-10-19T15:23:19+02:00 Sébastien Loriot
// SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial
//
//
// Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez
//
#ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
#define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
#include <CGAL/license/Shape_detection.h>
#include <CGAL/Random.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h>
// for octree ------------------------------
#include <boost/iterator/filter_iterator.hpp>
#include <CGAL/bounding_box.h>
#include <CGAL/Iterator_range.h>
//----------
#include <vector>
#include <cmath>
#include <limits>
#include <fstream>
#include <sstream>
#include <functional>
// boost --------------
#include <CGAL/boost/iterator/counting_iterator.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
//---------------------
namespace CGAL {
namespace Shape_detection {
/*!
\ingroup PkgShapeDetectionRANSAC
\brief Shape detection algorithm based on the RANSAC method.
Given a point set in 3D space with unoriented normals, sampled on surfaces,
this class enables to detect subsets of connected points lying on the surface of primitive shapes.
Each input point is assigned to either none or at most one detected primitive
shape. The implementation follows \cgalCite{schnabel2007efficient}.
\tparam Traits must be a model of `EfficientRANSACTraits`.
*/
template <class Traits>
class Efficient_RANSAC {
public:
/// \cond SKIP_IN_MANUAL
struct Filter_unassigned_points {
Filter_unassigned_points() : m_shape_index(dummy) {}
Filter_unassigned_points(const std::vector<int> &shapeIndex)
: m_shape_index(shapeIndex) {}
bool operator()(std::size_t x) {
if (x < m_shape_index.size())
return m_shape_index[x] == -1;
else return true; // to prevent infinite incrementing
}
const std::vector<int>& m_shape_index;
std::vector<int> dummy;
};
typedef boost::filter_iterator<Filter_unassigned_points,
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator;
///< iterator for indices of points.
/// \endcond
/// \name Types
/// @{
/// \cond SKIP_IN_MANUAL
typedef typename Traits::Input_range::iterator Input_iterator;
typedef typename Traits::FT FT; ///< number type.
typedef typename Traits::Point_3 Point; ///< point type.
typedef typename Traits::Vector_3 Vector; ///< vector type.
/// \endcond
typedef typename Traits::Input_range Input_range;
///< Model of the concept `Range` with random access iterators, providing input points and normals
/// through the following two property maps.
typedef typename Traits::Point_map Point_map;
///< Property map to access the location of an input point.
typedef typename Traits::Normal_map Normal_map;
///< Property map to access the unoriented normal of an input point.
typedef Shape_base<Traits> Shape; ///< Shape type.
typedef Plane<Traits> Plane_shape; ///< %Plane shape type.
#ifdef DOXYGEN_RUNNING
typedef unspecified_type Shape_range;
///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`.
typedef unspecified_type Plane_range;
///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`.
#else
struct Shape_range : public Iterator_range<
typename std::vector<boost::shared_ptr<Shape> >::const_iterator> {
typedef Iterator_range<
typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base;
Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > >
extracted_shapes) : Base(make_range(extracted_shapes->begin(),
extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {}
private:
boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > >
m_extracted_shapes; // keeps a reference to the shape vector
};
struct Plane_range : public Iterator_range<
typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> {
typedef Iterator_range<
typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base;
Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > >
extracted_shapes) : Base(make_range(extracted_shapes->begin(),
extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {}
private:
boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > >
m_extracted_shapes; // keeps a reference to the shape vector
};
#endif
#ifdef DOXYGEN_RUNNING
typedef unspecified_type Point_index_range;
///< `Iterator_range` with a bidirectional iterator with value type `std::size_t`
/// as indices into the input data that has not been assigned to a shape.
/// As this range class has no `size()` method, the method
/// `Efficient_RANSAC::number_of_unassigned_points()` is provided.
#else
typedef Iterator_range<Point_index_iterator>
Point_index_range;
#endif
/// @}
/// \name Parameters
/// @{
/*!
Parameters for the shape detection algorithm. They are explained in detail
in Section \ref Shape_detection_RANSACParameters of the User Manual.
*/
struct Parameters {
Parameters()
: probability((FT) 0.01)
, min_points((std::numeric_limits<std::size_t>::max)())
, epsilon(-1)
, normal_threshold((FT) 0.9)
, cluster_epsilon(-1)
{}
FT probability; ///< Probability to control search endurance. %Default value: 5%.
std::size_t min_points; ///< Minimum number of points of a shape. %Default value: 1% of total number of input points.
FT epsilon; ///< Maximum tolerance Euclidean distance from a point and a shape. %Default value: 1% of bounding box diagonal.
FT normal_threshold; ///< Maximum tolerance normal deviation from a point's normal to the normal on a shape at the projected point. %Default value: 0.9 (around 25 degrees).
FT cluster_epsilon; ///< Maximum distance between points to be considered connected. %Default value: 1% of bounding box diagonal.
};
/// @}
private:
typedef internal::Octree<internal::DirectPointAccessor<Traits> >
Direct_octree;
typedef internal::Octree<internal::IndexedPointAccessor<Traits> >
Indexed_octree;
//--------------------------------------------typedef
// Creates a function pointer for instancing shape instances.
template <class ShapeT>
static Shape *factory() {
return new ShapeT;
}
public:
/// \name Initialization
/// @{
/*!
Constructs an empty shape detection object.
*/
Efficient_RANSAC(Traits t = Traits())
: m_traits(t)
, m_direct_octrees(nullptr)
, m_global_octree(nullptr)
, m_num_subsets(0)
, m_num_available_points(0)
, m_num_total_points(0)
, m_valid_iterators(false)
{}
/*!
Releases all memory allocated by this instance including shapes.
*/
~Efficient_RANSAC() {
clear();
}
/*!
Retrieves the traits class.
*/
const Traits&
traits() const
{
return m_traits;
}
/*!
Retrieves the point property map.
*/
const Point_map& point_map() const { return m_point_pmap; }
/*!
Retrieves the normal property map.
*/
const Normal_map& normal() const { return m_normal_pmap; }
Input_iterator input_iterator_first() const
{
return m_input_iterator_first;
}
Input_iterator input_iterator_beyond() const
{
return m_input_iterator_beyond;
}
/*!
Sets the input data. The range must stay valid
until the detection has been performed and the access to the
results is no longer required. The data in the input is reordered by the methods
`detect()` and `preprocess()`. This function first calls `clear()`.
*/
void set_input(
Input_range& input_range,
///< Range of input data.
Point_map point_map = Point_map(),
///< Property map to access the position of an input point.
Normal_map normal_map = Normal_map()
///< Property map to access the normal of an input point.
) {
m_point_pmap = point_map;
m_normal_pmap = normal_map;
m_input_iterator_first = input_range.begin();
m_input_iterator_beyond = input_range.end();
clear();
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points = std::distance(
m_input_iterator_first, m_input_iterator_beyond);
m_valid_iterators = true;
}
/*!
Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`.
For example, for registering a plane as detectable shape, you should call
`ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note
that if your call is within a template, you should add the `template`
keyword just before `add_shape_factory`:
`ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`.
*/
template <class Shape_type>
void add_shape_factory() {
m_shape_factories.push_back(factory<Shape_type>);
}
/*!
Constructs internal data structures required for the shape detection.
These structures only depend on the input data, i.e. the points and
normal vectors. This method is called by `detect()`, if it was not called
before by the user.
*/
bool preprocess() {
if (m_num_total_points == 0)
return false;
// Generation of subsets
m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t)
std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2);
// SUBSET GENERATION ->
// approach with increasing subset sizes -> replace with octree later on
Input_iterator last = m_input_iterator_beyond - 1;
std::size_t remainingPoints = m_num_total_points;
m_available_octree_sizes.resize(m_num_subsets);
m_direct_octrees = new Direct_octree *[m_num_subsets];
for (int s = int(m_num_subsets) - 1;s >= 0;--s) {
std::size_t subsetSize = remainingPoints;
std::vector<std::size_t> indices(subsetSize);
if (s) {
subsetSize >>= 1;
for (std::size_t i = 0;i<subsetSize;i++) {
std::size_t index = get_default_random()(2);
index = index + (i<<1);
index = (index >= remainingPoints) ? remainingPoints - 1 : index;
indices[i] = index;
}
// move points to the end of the point vector
std::size_t j = subsetSize;
do {
j--;
typename std::iterator_traits<Input_iterator>::value_type
tmp = (*last);
*last = m_input_iterator_first[indices[std::size_t(j)]];
m_input_iterator_first[indices[std::size_t(j)]] = tmp;
last--;
} while (j > 0);
m_direct_octrees[s] = new Direct_octree(
m_traits, last + 1,
last + subsetSize + 1,
m_point_pmap, m_normal_pmap,
remainingPoints - subsetSize);
}
else
m_direct_octrees[0] = new Direct_octree(
m_traits, m_input_iterator_first,
m_input_iterator_first + (subsetSize),
m_point_pmap, m_normal_pmap,
0);
m_available_octree_sizes[s] = subsetSize;
m_direct_octrees[s]->createTree(m_options.cluster_epsilon);
remainingPoints -= subsetSize;
}
m_global_octree = new Indexed_octree(
m_traits, m_input_iterator_first, m_input_iterator_beyond,
m_point_pmap, m_normal_pmap);
m_global_octree->createTree(m_options.cluster_epsilon);
return true;
}
/// @}
/// \name Memory Management
/// @{
/*!
Removes all shape types registered for detection.
*/
void clear_shape_factories() {
m_shape_factories.clear();
}
/*!
Frees memory allocated for the internal search structures but keeps the detected shapes.
It invalidates the range retrieved using `unassigned_points()`.
*/
void clear_octrees() {
// If there is no data yet, there are no data structures.
if (!m_valid_iterators)
return;
if (m_global_octree) {
delete m_global_octree;
m_global_octree = nullptr;
}
if (m_direct_octrees) {
for (std::size_t i = 0;i<m_num_subsets;i++)
delete m_direct_octrees[i];
delete [] m_direct_octrees;
m_direct_octrees = nullptr;
}
m_num_subsets = 0;
}
/*!
Calls `clear_octrees()` and removes all detected shapes.
All internal structures are cleaned, including formerly detected shapes.
Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()`
are invalidated.
*/
void clear() {
// If there is no data yet, there are no data structures.
if (!m_valid_iterators)
return;
std::vector<int>().swap(m_shape_index);
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points;
clear_octrees();
clear_shape_factories();
}
/// @}
/// \name Detection
/// @{
/*!
Performs the shape detection. Shape types considered during the detection
are those registered using `add_shape_factory()`.
\param options parameters for shape detection
\param callback can be omitted if the algorithm should be run
without any callback. It is called regularly when the algorithm
is running: the current advancement (between 0.0 and 1.0) is
passed as parameter. If it returns `true`, then the algorithm
continues its execution normally; if it returns `false`, the
algorithm is stopped. Note that this interruption may leave the
class in an invalid state.
\return `true` if shape types have been registered and
input data has been set. Otherwise, `false` is returned.
*/
bool detect(const Parameters &options = Parameters(),
const std::function<bool(double)>& callback
= std::function<bool(double)>())
{
m_options = options;
// No shape types for detection or no points provided, exit
if (m_shape_factories.size() == 0 ||
(m_input_iterator_beyond - m_input_iterator_first) == 0)
return false;
if (m_num_subsets == 0 || m_global_octree == 0) {
if (!preprocess())
return false;
}
if (callback && !callback(0.))
return false;
// Reset data structures possibly used by former search
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points;
for (std::size_t i = 0;i<m_num_subsets;i++) {
m_available_octree_sizes[i] = m_direct_octrees[i]->size();
}
// Use bounding box diagonal as reference for default values
Bbox_3 bbox = m_global_octree->boundingBox();
FT bbox_diagonal = (FT) CGAL::sqrt(
(bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin())
+ (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin())
+ (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin()));
// Epsilon or cluster_epsilon have been set by the user?
// If not, derive from bounding box diagonal
m_options.epsilon = (m_options.epsilon < 0)
? bbox_diagonal * (FT) 0.01 : m_options.epsilon;
m_options.cluster_epsilon = (m_options.cluster_epsilon < 0)
? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon;
// Minimum number of points has been set?
m_options.min_points =
(m_options.min_points >= m_num_available_points) ?
(std::size_t)((FT)0.01 * m_num_available_points) :
m_options.min_points;
m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points;
// Initializing the shape index
m_shape_index.assign(m_num_available_points, -1);
// List of all randomly drawn candidates
// with the minimum number of points
std::vector<Shape *> candidates;
// Identifying minimum number of samples
std::size_t required_samples = 0;
for (std::size_t i = 0;i<m_shape_factories.size();i++) {
Shape *tmp = (Shape *) m_shape_factories[i]();
required_samples = (std::max<std::size_t>)(required_samples, tmp->minimum_sample_size());
delete tmp;
}
std::size_t first_sample; // first sample for RANSAC
FT best_expected = 0;
// number of points that have been assigned to a shape
std::size_t num_invalid = 0;
std::size_t generated_candidates = 0;
std::size_t failed_candidates = 0;
std::size_t limit_failed_candidates = (std::max)(std::size_t(10000),
std::size_t(m_input_iterator_beyond
- m_input_iterator_first)
/ std::size_t(100));
bool force_exit = false;
bool keep_searching = true;
do { // main loop
best_expected = 0;
if (keep_searching)
do {
// Generate candidates
//1. pick a point p1 randomly among available points
std::set<std::size_t> indices;
bool done = false;
do {
do
first_sample = get_default_random()(
static_cast<unsigned int>(m_num_available_points));
while (m_shape_index[first_sample] != -1);
done = m_global_octree->drawSamplesFromCellContainingPoint(
get(m_point_pmap,
*(m_input_iterator_first + first_sample)),
select_random_octree_level(),
indices,
m_shape_index,
required_samples);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
} while (m_shape_index[first_sample] != -1 || !done);
generated_candidates++;
//add candidate for each type of primitives
for(typename std::vector<Shape *(*)()>::iterator it =
m_shape_factories.begin(); it != m_shape_factories.end(); it++) {
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
Shape *p = (Shape *) (*it)();
//compute the primitive and says if the candidate is valid
p->compute(indices,
m_input_iterator_first,
m_traits,
m_point_pmap,
m_normal_pmap,
m_options.epsilon,
m_options.normal_threshold);
if (p->is_valid()) {
improve_bound(p, m_num_available_points - num_invalid, 1, 500);
//evaluate the candidate
if(p->max_bound() >= m_options.min_points && p->score() > 0) {
if (best_expected < p->expected_value())
best_expected = p->expected_value();
candidates.push_back(p);
}
else {
failed_candidates++;
delete p;
}
}
else {
failed_candidates++;
delete p;
}
}
if (failed_candidates >= limit_failed_candidates)
{
force_exit = true;
}
keep_searching = (stop_probability(m_options.min_points,
m_num_available_points - num_invalid,
generated_candidates, m_global_octree->maxLevel())
> m_options.probability);
} while( !force_exit
&& stop_probability((std::size_t) best_expected,
m_num_available_points - num_invalid,
generated_candidates,
m_global_octree->maxLevel())
> m_options.probability
&& keep_searching);
// end of generate candidate
if (force_exit) {
break;
}
if (candidates.empty())
continue;
// Now get the best candidate in the current set of all candidates
// Note that the function sorts the candidates:
// the best candidate is always the last element of the vector
Shape *best_candidate =
get_best_candidate(candidates, m_num_available_points - num_invalid);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// If search is done and the best candidate is too small, we are done.
if (!keep_searching && best_candidate->m_score < m_options.min_points)
break;
if (!best_candidate)
continue;
best_candidate->m_indices.clear();
best_candidate->m_score =
m_global_octree->score(best_candidate,
m_shape_index,
FT(3) * m_options.epsilon,
m_options.normal_threshold);
best_expected = static_cast<FT>(best_candidate->m_score);
best_candidate->connected_component(best_candidate->m_indices,
m_options.cluster_epsilon);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// check score against min_points and clear out candidates if too low
if (best_candidate->indices_of_assigned_points().size() <
m_options.min_points)
{
if (!(best_candidate->indices_of_assigned_points().empty()))
for (std::size_t i = 0;i < candidates.size() - 1;i++) {
if (best_candidate->is_same(candidates[i])) {
delete candidates[i];
candidates[i] = nullptr;
}
}
candidates.back() = nullptr;
delete best_candidate;
best_candidate = nullptr;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// Trimming candidates list
std::size_t empty = 0, occupied = 0;
while (empty < candidates.size()) {
while (empty < candidates.size() && candidates[empty]) empty++;
if (empty >= candidates.size())
break;
if (occupied < empty)
occupied = empty + 1;
while (occupied < candidates.size() && !candidates[occupied])
occupied++;
if (occupied >= candidates.size())
break;
candidates[empty] = candidates[occupied];
candidates[occupied] = nullptr;
empty++;
occupied++;
}
candidates.resize(empty);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
}
else
if (stop_probability((std::size_t) best_candidate->expected_value(),
(m_num_available_points - num_invalid),
generated_candidates,
m_global_octree->maxLevel())
<= m_options.probability) {
// Remove candidate from list
candidates.back() = nullptr;
//1. add best candidate to final result.
m_extracted_shapes->push_back(
boost::shared_ptr<Shape>(best_candidate));
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
//2. remove the points
const std::vector<std::size_t> &indices_points_best_candidate =
best_candidate->indices_of_assigned_points();
// update generated candidates to reflect removal of points
generated_candidates = std::size_t(std::pow (1.f - (indices_points_best_candidate.size() /
float(m_num_available_points - num_invalid)), 3.f)
* generated_candidates);
//2.3 Remove the points from the subtrees
for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) {
m_shape_index[indices_points_best_candidate.at(i)] =
int(m_extracted_shapes->size()) - 1;
num_invalid++;
for (std::size_t j = 0;j<m_num_subsets;j++) {
if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) {
std::size_t offset = m_direct_octrees[j]->offset();
if (offset <= indices_points_best_candidate.at(i) &&
(indices_points_best_candidate.at(i) - offset)
< m_direct_octrees[j]->size()) {
m_available_octree_sizes[j]--;
}
}
}
}
failed_candidates = 0;
best_expected = 0;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
std::vector<std::size_t> subset_sizes(m_num_subsets);
subset_sizes[0] = m_available_octree_sizes[0];
for (std::size_t i = 1;i<m_num_subsets;i++) {
subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i];
}
//3. Remove points from candidates common with extracted primitive
//#pragma omp parallel for
best_expected = 0;
for (std::size_t i=0;i< candidates.size()-1;i++) {
if (candidates[i]) {
candidates[i]->update_points(m_shape_index);
candidates[i]->compute_bound(
subset_sizes[candidates[i]->m_nb_subset_used - 1],
m_num_available_points - num_invalid);
if (candidates[i]->max_bound() < m_options.min_points) {
delete candidates[i];
candidates[i] = nullptr;
}
else {
best_expected = (candidates[i]->expected_value() > best_expected) ?
candidates[i]->expected_value() : best_expected;
}
}
}
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
std::size_t start = 0, end = candidates.size() - 1;
while (start < end) {
while (candidates[start] && start < end) start++;
while (!candidates[end] && start < end) end--;
if (!candidates[start] && candidates[end] && start < end) {
candidates[start] = candidates[end];
candidates[end] = nullptr;
start++;
end--;
}
}
if (candidates[end]) end++;
candidates.resize(end);
}
else if (!keep_searching)
++ generated_candidates;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
keep_searching = (stop_probability(m_options.min_points,
m_num_available_points - num_invalid,
generated_candidates,
m_global_octree->maxLevel())
> m_options.probability);
}
while((keep_searching
&& FT(m_num_available_points - num_invalid) >= m_options.min_points)
|| best_expected >= m_options.min_points);
// Clean up remaining candidates.
for (std::size_t i = 0;i<candidates.size();i++)
delete candidates[i];
candidates.resize(0);
m_num_available_points -= num_invalid;
return true;
}
/// @}
/// \name Access
/// @{
/*!
Returns an `Iterator_range` with a bidirectional iterator with value type
`boost::shared_ptr<Shape>` over the detected shapes in the order of detection.
Depending on the chosen probability
for the detection, the shapes are ordered with decreasing size.
*/
Shape_range shapes() const {
return Shape_range(m_extracted_shapes);
}
/*!
Returns an `Iterator_range` with a bidirectional iterator with
value type `boost::shared_ptr<Plane_shape>` over only the
detected planes in the order of detection. Depending on the
chosen probability for the detection, the planes are ordered
with decreasing size.
*/
Plane_range planes() const {
boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes
= boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >();
for (std::size_t i = 0; i < m_extracted_shapes->size(); ++ i)
{
boost::shared_ptr<Plane_shape> pshape
= boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]);
// Ignore all shapes other than plane
if (pshape != boost::shared_ptr<Plane_shape>())
planes->push_back (pshape);
}
return Plane_range(planes);
}
/*!
Number of points not assigned to a shape.
*/
std::size_t number_of_unassigned_points() const {
return m_num_available_points;
}
/*!
Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t`
as indices into the input data that has not been assigned to a shape.
*/
Point_index_range indices_of_unassigned_points() {
Filter_unassigned_points fup(m_shape_index);
Point_index_iterator p1 =
boost::make_filter_iterator<Filter_unassigned_points>(
fup,
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0),
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size()));
return make_range(p1, Point_index_iterator(p1.end()));
}
/// @}
private:
int select_random_octree_level() {
return (int) get_default_random()(
static_cast<unsigned int>(m_global_octree->maxLevel() + 1));
}
Shape* get_best_candidate(std::vector<Shape* >& candidates,
const std::size_t num_available_points) {
if (candidates.size() == 1)
return candidates.back();
int index_worse_candidate = 0;
bool improved = true;
while (index_worse_candidate < (int)candidates.size() - 1 && improved) {
improved = false;
typename Shape::Compare_by_max_bound comp;
std::sort(candidates.begin() + index_worse_candidate,
candidates.end(),
comp);
//refine the best one
improve_bound(candidates.back(),
num_available_points, m_num_subsets,
m_options.min_points);
int position_stop;
//Take all those intersecting the best one, check for equal ones
for (position_stop = int(candidates.size()) - 1;
position_stop > index_worse_candidate;
position_stop--) {
if (candidates.back()->min_bound() >
candidates.at(position_stop)->max_bound())
break;//the intervals do not overlaps anymore
if (candidates.at(position_stop)->max_bound()
<= m_options.min_points)
break; //the following candidate doesn't have enough points!
//if we reach this point, there is an overlap
// between best one and position_stop
//so request refining bound on position_stop
improved |= improve_bound(candidates.at(position_stop),
num_available_points,
m_num_subsets,
m_options.min_points);
//test again after refined
if (candidates.back()->min_bound() >
candidates.at(position_stop)->max_bound())
break;//the intervals do not overlaps anymore
}
index_worse_candidate = position_stop;
}
return candidates.back();
}
bool improve_bound(Shape *candidate,
std::size_t num_available_points,
std::size_t max_subset,
std::size_t min_points) {
if (candidate->m_nb_subset_used >= max_subset)
return false;
if (candidate->m_nb_subset_used >= m_num_subsets)
return false;
candidate->m_nb_subset_used =
(candidate->m_nb_subset_used >= m_num_subsets) ?
m_num_subsets - 1 : candidate->m_nb_subset_used;
//what it does is add another subset and recompute lower and upper bound
//the next subset to include is provided by m_nb_subset_used
std::size_t num_points_evaluated = 0;
for (std::size_t i=0;i<candidate->m_nb_subset_used;i++)
num_points_evaluated += m_available_octree_sizes[i];
// need score of new subset as well as sum of
// the score of the previous considered subset
std::size_t new_score = 0;
std::size_t new_sampled_points = 0;
do {
new_score = m_direct_octrees[candidate->m_nb_subset_used]->score(
candidate,
m_shape_index,
m_options.epsilon,
m_options.normal_threshold);
candidate->m_score += new_score;
num_points_evaluated +=
m_available_octree_sizes[candidate->m_nb_subset_used];
new_sampled_points +=
m_available_octree_sizes[candidate->m_nb_subset_used];
candidate->m_nb_subset_used++;
} while (new_sampled_points < min_points &&
candidate->m_nb_subset_used < m_num_subsets);
candidate->m_score = candidate->m_indices.size();
candidate->compute_bound(num_points_evaluated, num_available_points);
return true;
}
inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const {
return (std::min<FT>)(std::pow((FT) 1.f - (FT) largest_candidate / FT(num_pts * octree_depth * 4), (int) num_candidates), (FT) 1);
}
private:
Parameters m_options;
// Traits class.
Traits m_traits;
// Octrees build on input data for quick shape evaluation and
// sample selection within an octree cell.
Direct_octree **m_direct_octrees;
Indexed_octree *m_global_octree;
std::vector<std::size_t> m_available_octree_sizes;
std::size_t m_num_subsets;
// maps index into points to assigned extracted primitive
std::vector<int> m_shape_index;
std::size_t m_num_available_points;
std::size_t m_num_total_points;
//give the index of the subset of point i
std::vector<int> m_index_subsets;
boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes;
std::vector<Shape *(*)()> m_shape_factories;
// iterators of input data
bool m_valid_iterators;
Input_iterator m_input_iterator_first, m_input_iterator_beyond;
Point_map m_point_pmap;
Normal_map m_normal_pmap;
};
}
}
#endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
|
pr86025.c | /* PR c++/86025 */
/* { dg-do compile } */
/* { dg-additional-options "-Wduplicated-branches" } */
int i;
void
foo (int x)
{
if (x)
{
#pragma omp critical (foo)
i++;
}
else
{
#pragma omp critical
i++;
}
}
|
encrypt.c | #include "encrypt.h"
#include "files.h"
#include "mmap.h"
#include "error.h"
#include <stdlib.h>
#include <inttypes.h>
#include <limits.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#define INT_LEN sizeof(int) //int length in bytes
#define PAR_BLCK (256 * 1024) //dimension, in bytes, of a thread unit of execution
#ifdef _WIN32
static int rand_r(unsigned int *seed);
#endif
static int create_cipher_file(const char *name, fsize_t size, File *out);
int encrypt(File plainfd, const char *out_name, unsigned int key_seed) {
if(out_name == NULL) return -1;
fsize_t size;
if(fget_file_size(plainfd, &size)) {
perr("Error getting plaintext file size");
return -1;
}
//map the plaintext and ciphertext file
MemoryMap *plain = memory_map(plainfd, size, MMAP_READ, MMAP_PRIVATE);
if(!plain) {
perr("Error mapping the file for encryption");
return -1;
}
//create and mmap the ciphertext file
File cipherfd;
if(create_cipher_file(out_name, size, &cipherfd)) {
perr("Error mapping the cipher file");
memory_unmap(plain);
return -1;
}
MemoryMap *cipher = memory_map(cipherfd, size, MMAP_READ | MMAP_WRITE, MMAP_SHARED);
if(!cipher) {
perr("Error mapping the cipher file");
memory_unmap(plain);
close_file(cipherfd);
delete_file(out_name); //delete the cipher file on error
return -1;
}
//the number of 256Kb chunks in the file
size_t num_chunks = ceil(size/(float) PAR_BLCK);
//generates a new seed for every thread (1 per chunk) using the supplied seed
unsigned int *seeds = malloc(sizeof(unsigned int) * num_chunks);
for(size_t i = 0; i < num_chunks; i++) {
seeds[i] = rand_r(&key_seed);
}
//finally encrypt the file
int result = 0;
#pragma omp parallel for
for(size_t n = 0; n < num_chunks; n++) {
fsize_t from = n * PAR_BLCK;
fsize_t len = (from + PAR_BLCK) > size ? size - from : PAR_BLCK;
//map views of the size of the chunk
int *plain_chunk = mmap_mapview(plain, from, len);
int *cipher_chunk = mmap_mapview(cipher, from, len);
if(!cipher_chunk || !plain_chunk) {
perr("Error getting mapped file view");
#pragma omp atomic write
result = -1; //error
}
//encrypt the bytes of the chunk in 4 bytes groups
int len_int = floor(len/(float) INT_LEN);
for(int i = 0; i < len_int; i++) {
cipher_chunk[i] = plain_chunk[i] ^ rand_r(&seeds[n]);
}
//if the file is not a multiple of 4 then encrypt the last bytes 1 at a time
int remainder;
if((remainder = len % INT_LEN) != 0) {
int k = rand_r(&seeds[n]);
for(int i = len - remainder; i < len; i++) {
((char *) cipher_chunk)[i] = ((char *) plain_chunk)[i] ^ ((char *) &k)[remainder - (len - i)];
}
}
mmap_unmapview(plain_chunk);
mmap_unmapview(cipher_chunk);
}
memory_unmap(plain);
memory_unmap(cipher);
free(seeds);
unlock_file(cipherfd, 0, size);
close_file(cipherfd);
if(result == -1)
delete_file(out_name);
return result;
}
/*
* Creates the cipher file and locks it over size 'size'. The size should be the same as the plaintext file
* @return 0 on success, non 0 on failure.
*/
static int create_cipher_file(const char *name, fsize_t size, File *out) {
int err = 0;
*out = open_file(name, READ | WRITE | CREATE, &err);
if(err) {
perr("Error creating the ciphertext file");
return -1;
}
if(lock_file(*out, 0, size)) {
perr("Error locking the ciphertext file");
close_file(*out);
return -1;
}
return 0;
}
//Mingw-w64 does not seem to have rand_r implemented. The following implementation is
//taken from the Mingw source code on sourceforge
#ifdef _WIN32
/**Thread safe random number generator.*/
static int rand_r(unsigned int *seed) {
long k;
long s = (long)(*seed);
if (s == 0)
s = 0x12345987;
k = s / 127773;
s = 16807 * (s - k * 127773) - 2836 * k;
if (s < 0)
s += 2147483647;
(*seed) = (unsigned int)s;
return (int)(s & RAND_MAX);
}
#endif
|
pngquant.c | /* pngquant.c - quantize the colors in an alphamap down to a specified number
**
** © 2009-2019 by Kornel Lesiński.
** © 1989, 1991 by Jef Poskanzer.
** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider.
**
** See COPYRIGHT file for license.
*/
char *PNGQUANT_USAGE = "\
usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\
pngquant [options] [ncolors] - >stdout <stdin\n\n\
options:\n\
--force overwrite existing output files (synonym: -f)\n\
--skip-if-larger only save converted files if they're smaller than original\n\
--output file destination file path to use instead of --ext (synonym: -o)\n\
--ext new.png set custom suffix/extension for output filenames\n\
--quality min-max don't save below min, use fewer colors below max (0-100)\n\
--speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\
--nofs disable Floyd-Steinberg dithering\n\
--posterize N output lower-precision color (e.g. for ARGB4444 output)\n\
--strip remove optional metadata (default on Mac)\n\
--verbose print status messages (synonym: -v)\n\
\n\
Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\
The output filename is the same as the input name except that\n\
it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\
input is stdin, in which case the quantized image will go to stdout).\n\
If you pass the special output path \"-\" and a single input file, that file\n\
will be processed and the quantized image will go to stdout.\n\
The default behavior if the output file exists is to skip the conversion;\n\
use --force to overwrite. See man page for full list of options.\n";
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <math.h>
#if defined(_WIN32) || defined(WIN32) || defined(__WIN32__)
# include <fcntl.h> /* O_BINARY */
# include <io.h> /* setmode() */
#else
# include <unistd.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "rwpng.h" /* typedefs, common macros, public prototypes */
#include "libimagequant.h" /* if it fails here, run: git submodule update; ./configure; or add -Ilib to compiler flags */
#include "pngquant_opts.h"
char *PNGQUANT_VERSION = LIQ_VERSION_STRING " (July 2019)";
static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image);
static void set_palette(liq_result *result, png8_image *output_image);
static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose);
static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq);
static char *add_filename_extension(const char *filename, const char *newext);
static bool file_exists(const char *outname);
static void verbose_printf(liq_attr *liq, struct pngquant_options *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
#if defined(_MSC_VER)
char *buf = malloc(required_space);
#else
char buf[required_space];
#endif
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(liq, buf, context->log_callback_user_info);
#if defined(_MSC_VER)
free(buf);
#endif
}
}
static void log_callback(const liq_attr *attr, const char *msg, void* user_info)
{
fprintf(stderr, "%s\n", msg);
}
#ifdef _OPENMP
#define LOG_BUFFER_SIZE 1300
struct buffered_log {
int buf_used;
char buf[LOG_BUFFER_SIZE];
};
static void log_callback_buferred_flush(const liq_attr *attr, void *context)
{
struct buffered_log *log = context;
if (log->buf_used) {
fwrite(log->buf, 1, log->buf_used, stderr);
fflush(stderr);
log->buf_used = 0;
}
}
static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context)
{
struct buffered_log *log = context;
int len = strlen(msg);
if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2;
if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log);
memcpy(&log->buf[log->buf_used], msg, len);
log->buf_used += len+1;
log->buf[log->buf_used-1] = '\n';
log->buf[log->buf_used] = '\0';
}
#endif
void pngquant_internal_print_config(FILE *fd) {
fputs(""
#ifndef NDEBUG
" WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */
#endif
#if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__))
" SSE acceleration disabled.\n"
#endif
#if _OPENMP
" Compiled with OpenMP (multicore support).\n"
#endif
, fd);
fflush(fd);
}
FILE *pngquant_c_stderr() {
return stderr;
}
FILE *pngquant_c_stdout() {
return stdout;
}
static void print_full_version(FILE *fd)
{
fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n", PNGQUANT_VERSION);
pngquant_internal_print_config(fd);
rwpng_version_info(fd);
fputs("\n", fd);
}
static void print_usage(FILE *fd)
{
fputs(PNGQUANT_USAGE, fd);
}
/**
* N = automatic quality, uses limit unless force is set (N-N or 0-N)
* -N = no better than N (same as 0-N)
* N-M = no worse than N, no better than M
* N- = no worse than N, perfect if possible (same as N-100)
*
* where N,M are numbers between 0 (lousy) and 100 (perfect)
*/
static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit)
{
long limit, target;
const char *str = quality; char *end;
long t1 = strtol(str, &end, 10);
if (str == end) return false;
str = end;
if ('\0' == end[0] && t1 < 0) { // quality="-%d"
target = -t1;
limit = 0;
} else if ('\0' == end[0]) { // quality="%d"
target = t1;
limit = t1*9/10;
} else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-"
target = 100;
limit = t1;
} else { // quality="%d-%d"
long t2 = strtol(str, &end, 10);
if (str == end || t2 > 0) return false;
target = -t2;
limit = t1;
}
*min_quality_limit = (limit > 0);
return LIQ_OK == liq_set_quality(options, limit, target);
}
pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq);
static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq);
#ifndef PNGQUANT_NO_MAIN
int main(int argc, char *argv[])
{
struct pngquant_options options = {
.floyd = 1.f, // floyd-steinberg dithering
.strip = false,
};
pngquant_error retval = pngquant_parse_options(argc, argv, &options);
if (retval != SUCCESS) {
return retval;
}
if (options.print_version) {
puts(PNGQUANT_VERSION);
return SUCCESS;
}
if (options.missing_arguments) {
print_full_version(stderr);
print_usage(stderr);
return MISSING_ARGUMENT;
}
if (options.print_help) {
print_full_version(stdout);
print_usage(stdout);
return SUCCESS;
}
liq_attr *liq = liq_attr_create();
if (!liq) {
fputs("SSE-capable CPU is required for this build.\n", stderr);
return WRONG_ARCHITECTURE;
}
if (options.quality && !parse_quality(options.quality, liq, &options.min_quality_limit)) {
fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr);
return INVALID_ARGUMENT;
}
if (options.iebug) {
// opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0.
liq_set_min_opacity(liq, 238);
fputs(" warning: the workaround for IE6 is deprecated\n", stderr);
}
if (options.verbose) {
liq_set_log_callback(liq, log_callback, NULL);
options.log_callback = log_callback;
}
if (options.last_index_transparent) {
liq_set_last_index_transparent(liq, true);
}
if (options.speed >= 10) {
options.fast_compression = true;
if (options.speed == 11) {
options.floyd = 0;
options.speed = 10;
}
}
if (options.speed && LIQ_OK != liq_set_speed(liq, options.speed)) {
fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr);
return INVALID_ARGUMENT;
}
if (options.colors && LIQ_OK != liq_set_max_colors(liq, options.colors)) {
fputs("Number of colors must be between 2 and 256.\n", stderr);
return INVALID_ARGUMENT;
}
if (options.posterize && LIQ_OK != liq_set_min_posterization(liq, options.posterize)) {
fputs("Posterization should be number of bits in range 0-4.\n", stderr);
return INVALID_ARGUMENT;
}
if (options.extension && options.output_file_path) {
fputs("--ext and --output options can't be used at the same time\n", stderr);
return INVALID_ARGUMENT;
}
// new filename extension depends on options used. Typically basename-fs8.png
if (options.extension == NULL) {
options.extension = options.floyd > 0 ? "-fs8.png" : "-or8.png";
}
if (options.output_file_path && options.num_files != 1) {
fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr);
return INVALID_ARGUMENT;
}
if (options.using_stdout && !options.using_stdin && options.num_files != 1) {
fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr);
return INVALID_ARGUMENT;
}
if (!options.num_files && !options.using_stdin) {
fputs("No input files specified.\n", stderr);
if (options.verbose) {
print_full_version(stderr);
}
print_usage(stderr);
return MISSING_ARGUMENT;
}
retval = pngquant_main_internal(&options, liq);
liq_attr_destroy(liq);
return retval;
}
#endif
// Don't use this. This is not a public API.
pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq)
{
if (options->map_file) {
png24_image tmp = {.width=0};
if (SUCCESS != read_image(liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) {
fprintf(stderr, " error: unable to load %s", options->map_file);
return INVALID_ARGUMENT;
}
liq_result *tmp_quantize = liq_quantize_image(liq, options->fixed_palette_image);
const liq_palette *pal = liq_get_palette(tmp_quantize);
if (!pal) {
fprintf(stderr, " error: unable to read colors from %s", options->map_file);
return INVALID_ARGUMENT;
}
for(unsigned int i=0; i < pal->count; i++) {
liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]);
}
liq_result_destroy(tmp_quantize);
}
#ifdef _OPENMP
// if there's a lot of files, coarse parallelism can be used
if (options->num_files > 2*omp_get_max_threads()) {
omp_set_nested(0);
omp_set_dynamic(1);
} else {
omp_set_nested(1);
}
#endif
unsigned int error_count=0, skipped_count=0, file_count=0;
pngquant_error latest_error=SUCCESS;
#pragma omp parallel for \
schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error)
for(int i=0; i < options->num_files; i++) {
const char *filename = options->using_stdin ? "stdin" : options->files[i];
struct pngquant_options opts = *options;
liq_attr *local_liq = liq_attr_copy(liq);
#ifdef _OPENMP
struct buffered_log buf = {0};
if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) {
liq_set_log_callback(local_liq, log_callback_buferred, &buf);
liq_set_log_flush_callback(local_liq, log_callback_buferred_flush, &buf);
opts.log_callback = log_callback_buferred;
opts.log_callback_user_info = &buf;
}
#endif
pngquant_error retval = SUCCESS;
const char *outname = opts.output_file_path;
char *outname_free = NULL;
if (!opts.using_stdout) {
if (!outname) {
outname = outname_free = add_filename_extension(filename, opts.extension);
}
if (!opts.force && file_exists(outname)) {
fprintf(stderr, " error: '%s' exists; not overwriting\n", outname);
retval = NOT_OVERWRITING_ERROR;
}
}
if (SUCCESS == retval) {
retval = pngquant_file_internal(filename, outname, &opts, local_liq);
}
free(outname_free);
liq_attr_destroy(local_liq);
if (retval) {
#pragma omp critical
{
latest_error = retval;
}
if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) {
skipped_count++;
} else {
error_count++;
}
}
++file_count;
}
if (error_count) {
verbose_printf(liq, options, "There were errors quantizing %d file%s out of a total of %d file%s.",
error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s");
}
if (skipped_count) {
verbose_printf(liq, options, "Skipped %d file%s out of a total of %d file%s.",
skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s");
}
if (!skipped_count && !error_count) {
verbose_printf(liq, options, "Quantized %d image%s.",
file_count, (file_count == 1)? "" : "s");
}
if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image);
return latest_error;
}
/// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c
static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq)
{
pngquant_error retval = SUCCESS;
verbose_printf(liq, options, "%s:", filename);
liq_image *input_image = NULL;
png24_image input_image_rwpng = {.width=0};
bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout
if (SUCCESS == retval) {
retval = read_image(liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose);
}
int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap
png8_image output_image = {.width=0};
if (SUCCESS == retval) {
verbose_printf(liq, options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL);
if (RWPNG_ICCP == input_image_rwpng.input_color) {
verbose_printf(liq, options, " used embedded ICC profile to transform image to sRGB colorspace");
} else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) {
verbose_printf(liq, options, " used gAMA and cHRM chunks to transform image to sRGB colorspace");
} else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) {
verbose_printf(liq, options, " warning: ignored ICC profile in GRAY colorspace");
} else if (RWPNG_COCOA == input_image_rwpng.input_color) {
// No comment
} else if (RWPNG_SRGB == input_image_rwpng.input_color) {
verbose_printf(liq, options, " passing sRGB tag from the input");
} else if (input_image_rwpng.gamma != 0.45455) {
verbose_printf(liq, options, " converted image from gamma %2.1f to gamma 2.2",
1.0/input_image_rwpng.gamma);
}
// when using image as source of a fixed palette the palette is extracted using regular quantization
liq_result *remap;
liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, liq, &remap);
if (LIQ_OK == remap_error) {
// fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2
// NB: can't change gamma here, because output_color is allowed to be an sRGB tag
liq_set_output_gamma(remap, 0.45455);
liq_set_dithering_level(remap, options->floyd);
retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image);
if (SUCCESS == retval) {
if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) {
retval = OUT_OF_MEMORY_ERROR;
}
set_palette(remap, &output_image);
double palette_error = liq_get_quantization_error(remap);
if (palette_error >= 0) {
quality_percent = liq_get_quantization_quality(remap);
verbose_printf(liq, options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent);
}
}
liq_result_destroy(remap);
} else if (LIQ_QUALITY_TOO_LOW == remap_error) {
retval = TOO_LOW_QUALITY;
} else {
retval = INVALID_ARGUMENT; // dunno
}
}
if (SUCCESS == retval) {
if (options->skip_if_larger) {
// this is very rough approximation, but generally avoid losing more quality than is gained in file size.
// Quality is raised to 1.5, because even greater savings are needed to justify big quality loss.
// but >50% savings are considered always worthwile in order to allow low quality conversions to work at all
const double quality = quality_percent/100.0;
const double expected_reduced_size = pow(quality, 1.5);
output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size);
}
output_image.fast_compression = options->fast_compression;
output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL;
retval = write_image(&output_image, NULL, outname, options, liq);
if (TOO_LARGE_FILE == retval) {
verbose_printf(liq, options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL);
}
if (SUCCESS == retval && output_image.metadata_size > 0) {
verbose_printf(liq, options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000);
}
}
if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) {
// when outputting to stdout it'd be nasty to create 0-byte file
// so if quality is too low, output 24-bit original
pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options, liq);
if (write_retval) {
retval = write_retval;
}
}
if (input_image) liq_image_destroy(input_image);
rwpng_free_image24(&input_image_rwpng);
rwpng_free_image8(&output_image);
return retval;
}
static void set_palette(liq_result *result, png8_image *output_image)
{
const liq_palette *palette = liq_get_palette(result);
output_image->num_palette = palette->count;
for(unsigned int i=0; i < palette->count; i++) {
const liq_color px = palette->entries[i];
output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a};
}
}
static bool file_exists(const char *outname)
{
FILE *outfile = fopen(outname, "rb");
if ((outfile ) != NULL) {
fclose(outfile);
return true;
}
return false;
}
/* build the output filename from the input name by inserting "-fs8" or
* "-or8" before the ".png" extension (or by appending that plus ".png" if
* there isn't any extension), then make sure it doesn't exist already */
static char *add_filename_extension(const char *filename, const char *newext)
{
size_t x = strlen(filename);
char* outname = malloc(x+4+strlen(newext)+1);
if (!outname) return NULL;
strcpy(outname, filename);
if (x > 4 && (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0)) {
strcpy(outname+x-4, newext);
} else {
strcpy(outname+x, newext);
}
return outname;
}
static char *temp_filename(const char *basename) {
size_t x = strlen(basename);
char *outname = malloc(x+1+4);
if (!outname) return NULL;
strcpy(outname, basename);
strcpy(outname+x, ".tmp");
return outname;
}
static void set_binary_mode(FILE *fp)
{
#if defined(_WIN32) || defined(WIN32) || defined(__WIN32__)
setmode(fp == stdout ? 1 : 0, O_BINARY);
#endif
}
static const char *filename_part(const char *path)
{
const char *outfilename = strrchr(path, '/');
if (outfilename) {
return outfilename+1;
} else {
return path;
}
}
static bool replace_file(const char *from, const char *to, const bool force) {
#if defined(_WIN32) || defined(WIN32) || defined(__WIN32__)
if (force) {
// On Windows rename doesn't replace
unlink(to);
}
#endif
return (0 == rename(from, to));
}
static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq)
{
FILE *outfile;
char *tempname = NULL;
if (options->using_stdout) {
set_binary_mode(stdout);
outfile = stdout;
if (output_image) {
verbose_printf(liq, options, " writing %d-color image to stdout", output_image->num_palette);
} else {
verbose_printf(liq, options, " writing truecolor image to stdout");
}
} else {
tempname = temp_filename(outname);
if (!tempname) return OUT_OF_MEMORY_ERROR;
if ((outfile = fopen(tempname, "wb")) == NULL) {
fprintf(stderr, " error: cannot open '%s' for writing\n", tempname);
free(tempname);
return CANT_WRITE_ERROR;
}
if (output_image) {
verbose_printf(liq, options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname));
} else {
verbose_printf(liq, options, " writing truecolor image as %s", filename_part(outname));
}
}
pngquant_error retval;
#pragma omp critical (libpng)
{
if (output_image) {
retval = rwpng_write_image8(outfile, output_image);
} else {
retval = rwpng_write_image24(outfile, output_image24);
}
}
if (!options->using_stdout) {
fclose(outfile);
if (SUCCESS == retval) {
// Image has been written to a temporary file and then moved over destination.
// This makes replacement atomic and avoids damaging destination file on write error.
if (!replace_file(tempname, outname, options->force)) {
retval = CANT_WRITE_ERROR;
}
}
if (retval) {
unlink(tempname);
}
}
free(tempname);
if (retval && retval != TOO_LARGE_FILE) {
fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval);
}
return retval;
}
static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose)
{
FILE *infile;
if (using_stdin) {
set_binary_mode(stdin);
infile = stdin;
} else if ((infile = fopen(filename, "rb")) == NULL) {
fprintf(stderr, " error: cannot open %s for reading\n", filename);
return READ_ERROR;
}
pngquant_error retval;
#pragma omp critical (libpng)
{
retval = rwpng_read_image24(infile, input_image_p, strip, verbose);
}
if (!using_stdin) {
fclose(infile);
}
if (retval) {
fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename));
return retval;
}
*liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma);
if (!*liq_image_p) {
return OUT_OF_MEMORY_ERROR;
}
if (!keep_input_pixels) {
if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) {
return OUT_OF_MEMORY_ERROR;
}
input_image_p->row_pointers = NULL;
input_image_p->rgba_data = NULL;
}
return SUCCESS;
}
static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image)
{
output_image->width = liq_image_get_width(input_image);
output_image->height = liq_image_get_height(input_image);
output_image->gamma = liq_get_output_gamma(result);
output_image->output_color = output_color;
/*
** Step 3.7 [GRR]: allocate memory for the entire indexed image
*/
output_image->indexed_data = malloc(output_image->height * output_image->width);
output_image->row_pointers = malloc(output_image->height * sizeof(output_image->row_pointers[0]));
if (!output_image->indexed_data || !output_image->row_pointers) {
return OUT_OF_MEMORY_ERROR;
}
for(size_t row = 0; row < output_image->height; row++) {
output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width;
}
const liq_palette *palette = liq_get_palette(result);
// tRNS, etc.
output_image->num_palette = palette->count;
return SUCCESS;
}
|
m_area_auto_covariance.h | //
// Created by Harold on 2021/6/28.
//
#ifndef M_MATH_M_AREA_AUTO_COVARIANCE_H
#define M_MATH_M_AREA_AUTO_COVARIANCE_H
#include <opencv2/core.hpp>
#include <omp.h>
namespace M_MATH {
template<typename T>
cv::Mat_<T> AreaAutoCovariance(cv::Mat_<T> const& I) {
static_assert(std::is_floating_point<T>::value, "T should be floating point");
auto ny = I.rows;
auto nx = I.cols;
cv::Mat_<T> AACV(ny, nx, T{});
#pragma omp parallel for collapse(2)
for (auto ty = 0; ty < ny; ty++)
for (auto tx = 0; tx < nx; tx++) {
auto tmp = T{};
for (auto j = 0; j < ny - ty; j++)
for (auto i = 0; i < nx - tx; i++)
if (j + ty - 1 < 0 || i + tx - 1 < 0) continue;
else tmp = tmp + I.at<T>(j, i) * I.at<T>(j + ty - 1, i + tx - 1); // I(-1, -1) is invalid, which means AACV row_0 and col_0 are invalid
AACV.at<T>(ty, tx) = tmp;
}
AACV /= (nx * ny);
// translate to remove invalid row_0 and col_0
//cv::Mat trans_mat = (cv::Mat_<T>(2,3) << 1, 0, -1, 0, 1, -1);
//cv::warpAffine(AACV, AACV, trans_mat, AACV.size());
// copy to remove invalid row_0 and col_0
AACV(cv::Rect(1, 1, I.cols - 1, I.rows - 1)).copyTo(AACV(cv::Rect(0, 0, I.cols - 1, I.rows - 1)));
AACV.row(I.rows - 1) = 0;
AACV.col(I.cols - 1) = 0;
return AACV;
}
}
#endif //M_MATH_M_AREA_AUTO_COVARIANCE_H |
bdf2_turbulent_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#if !defined(KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED )
#define KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "solving_strategies/schemes/scheme.h"
#include "includes/define.h"
// #include "includes/serializer.h"
#include "includes/dof.h"
#include "processes/process.h"
#include "containers/pointer_vector_set.h"
#include "utilities/coordinate_transformation_utilities.h"
// Application includes
#include "fluid_dynamics_application_variables.h"
namespace Kratos
{
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// A scheme for BDF2 time integration.
/**
*/
template<class TSparseSpace,class TDenseSpace>
class BDF2TurbulentScheme : public Scheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of BDF2TurbulentScheme
KRATOS_CLASS_POINTER_DEFINITION(BDF2TurbulentScheme);
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename TSparseSpace::DataType TDataType;
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
typedef typename TDenseSpace::MatrixType LocalSystemMatrixType;
typedef typename TDenseSpace::VectorType LocalSystemVectorType;
typedef Dof<TDataType> TDofType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef CoordinateTransformationUtils<LocalSystemMatrixType, LocalSystemVectorType, double> RotationToolType;
typedef typename RotationToolType::UniquePointer RotationToolPointerType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
BDF2TurbulentScheme()
: Scheme<TSparseSpace, TDenseSpace>()
, mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{}
/// Constructor to use the formulation combined with a turbulence model.
/**
* The turbulence model is assumed to be implemented as a Kratos::Process.
* The model's Execute() method wil be called at the start of each
* non-linear iteration.
* @param pTurbulenceModel pointer to the turbulence model
*/
BDF2TurbulentScheme(Process::Pointer pTurbulenceModel)
: Scheme<TSparseSpace, TDenseSpace>()
, mpTurbulenceModel(pTurbulenceModel)
, mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{}
/// Constructor for periodic boundary conditions.
/**
* @param rPeriodicVar the variable used to store periodic pair indices.
*/
BDF2TurbulentScheme(const Kratos::Variable<int>& rPeriodicVar)
: Scheme<TSparseSpace, TDenseSpace>()
, mrPeriodicIdVar(rPeriodicVar)
{}
/// Destructor.
~BDF2TurbulentScheme() override
{}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Check input data for errors.
/**
* @param rModelPart The fluid's ModelPart
* @return 0 if no errors were found
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
// Base scheme check
int error_code = BaseType::Check(rModelPart);
if (error_code != 0) {
return error_code;
}
// Check buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < 3)
<< "Insufficient buffer size for BDF2, should be at least 3, got " << rModelPart.GetBufferSize() << std::endl;
return 0;
KRATOS_CATCH("");
}
void Initialize(ModelPart& rModelPart) override
{
// Set up the rotation tool pointer
const auto& r_proces_info = rModelPart.GetProcessInfo();
const unsigned int domain_size = r_proces_info[DOMAIN_SIZE];
auto p_aux = Kratos::make_unique<RotationToolType>(domain_size, domain_size + 1, SLIP);
mpRotationTool.swap(p_aux);
// Base initialize call
BaseType::Initialize(rModelPart);
}
/// Set the time iteration coefficients
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
// Base function initializes elements and conditions
BaseType::InitializeSolutionStep(rModelPart,A,Dx,b);
// Recalculate mesh velocity (to account for variable time step)
const double tol = 1.0e-12;
const double Dt = rModelPart.GetProcessInfo()[DELTA_TIME];
const double OldDt = rModelPart.GetProcessInfo().GetPreviousSolutionStepInfo(1)[DELTA_TIME];
if(std::abs(Dt - OldDt) > tol) {
const int n_nodes = rModelPart.NumberOfNodes();
const Vector& BDFcoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS];
#pragma omp parallel for
for(int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
auto& rMeshVel = it_node->FastGetSolutionStepValue(MESH_VELOCITY);
const auto& rDisp0 = it_node->FastGetSolutionStepValue(DISPLACEMENT);
const auto& rDisp1 = it_node->FastGetSolutionStepValue(DISPLACEMENT,1);
const auto& rDisp2 = it_node->FastGetSolutionStepValue(DISPLACEMENT,2);
rMeshVel = BDFcoefs[0] * rDisp0 + BDFcoefs[1] * rDisp1 + BDFcoefs[2] * rDisp2;
}
}
}
void InitializeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
if (mpTurbulenceModel != 0) mpTurbulenceModel->Execute();
KRATOS_CATCH("")
}
void FinalizeNonLinIteration(
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//if orthogonal subscales are computed
if (CurrentProcessInfo[OSS_SWITCH] == 1.0)
{
this->LumpedProjection(rModelPart);
//this->FullProjection(rModelPart);
}
}
/// Start the iteration by providing a first approximation to the solution.
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
const int n_nodes = rModelPart.NumberOfNodes();
const Vector& BDFcoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS];
#pragma omp parallel for
for(int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
auto& rVel0 = it_node->FastGetSolutionStepValue(VELOCITY);
const auto& rVel1 = it_node->FastGetSolutionStepValue(VELOCITY,1);
const auto& rVel2 = it_node->FastGetSolutionStepValue(VELOCITY,2);
auto& rAcceleration = it_node->FastGetSolutionStepValue(ACCELERATION);
// Predict velocities
if(!it_node->IsFixed(VELOCITY_X))
rVel0[0] = 2.00 * rVel1[0] - rVel2[0];
if(!it_node->IsFixed(VELOCITY_Y))
rVel0[1] = 2.00 * rVel1[1] - rVel2[1];
if(!it_node->IsFixed(VELOCITY_Z))
rVel0[2] = 2.00 * rVel1[2] - rVel2[2];
// Predict acceleration
rAcceleration = BDFcoefs[0] * rVel0 + BDFcoefs[1] * rVel1 + BDFcoefs[2] * rVel2;
}
KRATOS_CATCH("")
}
/// Store the iteration results as solution step variables and update acceleration after a Newton-Raphson iteration.
/**
* @param rModelPart fluid ModelPart
* @param rDofSet DofSet containing the Newton-Raphson system degrees of freedom.
* @param A Newton-Raphson system matrix (unused)
* @param Dx Newton-Raphson iteration solution
* @param b Newton-Raphson right hand side (unused)
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
mpRotationTool->RotateVelocities(rModelPart);
mpDofUpdater->UpdateDofs(rDofSet,Dx);
mpRotationTool->RecoverVelocities(rModelPart);
const Vector& BDFCoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS];
this->UpdateAcceleration(rModelPart,BDFCoefs);
KRATOS_CATCH("")
}
void CalculateSystemContributions(
Element& rCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY
LocalSystemMatrixType Mass;
LocalSystemMatrixType Damp;
// Get Equation Id
rCurrentElement.EquationIdVector(rEquationId,rCurrentProcessInfo);
// Get matrix contributions
rCurrentElement.CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo);
rCurrentElement.CalculateMassMatrix(Mass,rCurrentProcessInfo);
rCurrentElement.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo);
// Add the dynamic contributions to the local system using BDF2 coefficients
this->CombineLHSContributions(LHS_Contribution,Mass,Damp,rCurrentProcessInfo);
this->AddDynamicRHSContribution<Kratos::Element>(rCurrentElement,RHS_Contribution,Mass,rCurrentProcessInfo);
// Apply slip condition
mpRotationTool->Rotate(LHS_Contribution, RHS_Contribution, rCurrentElement.GetGeometry());
mpRotationTool->ApplySlipCondition(LHS_Contribution, RHS_Contribution, rCurrentElement.GetGeometry());
KRATOS_CATCH("")
}
void CalculateRHSContribution(
Element& rCurrentElement,
LocalSystemVectorType &RHS_Contribution,
Element::EquationIdVectorType &rEquationId,
const ProcessInfo &rCurrentProcessInfo) override
{
KRATOS_TRY
LocalSystemMatrixType Mass;
LocalSystemMatrixType Damp;
// Get Equation Id
rCurrentElement.EquationIdVector(rEquationId,rCurrentProcessInfo);
// Get matrix contributions
rCurrentElement.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo);
rCurrentElement.CalculateMassMatrix(Mass,rCurrentProcessInfo);
rCurrentElement.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo);
// Add the dynamic contributions to the local system using BDF2 coefficients
this->AddDynamicRHSContribution<Kratos::Element>(rCurrentElement,RHS_Contribution,Mass,rCurrentProcessInfo);
// Apply slip condition
mpRotationTool->Rotate(RHS_Contribution, rCurrentElement.GetGeometry());
mpRotationTool->ApplySlipCondition(RHS_Contribution, rCurrentElement.GetGeometry());
KRATOS_CATCH("")
}
void CalculateSystemContributions(
Condition& rCurrentCondition,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY
LocalSystemMatrixType Mass;
LocalSystemMatrixType Damp;
// Get Equation Id
rCurrentCondition.EquationIdVector(rEquationId,rCurrentProcessInfo);
// Get matrix contributions
rCurrentCondition.CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo);
rCurrentCondition.CalculateMassMatrix(Mass,rCurrentProcessInfo);
rCurrentCondition.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo);
// Add the dynamic contributions to the local system using BDF2 coefficients
this->CombineLHSContributions(LHS_Contribution,Mass,Damp,rCurrentProcessInfo);
this->AddDynamicRHSContribution<Kratos::Condition>(rCurrentCondition,RHS_Contribution,Mass,rCurrentProcessInfo);
// Apply slip condition
mpRotationTool->Rotate(LHS_Contribution, RHS_Contribution, rCurrentCondition.GetGeometry());
mpRotationTool->ApplySlipCondition(LHS_Contribution, RHS_Contribution, rCurrentCondition.GetGeometry());
KRATOS_CATCH("")
}
void CalculateRHSContribution(
Condition &rCurrentCondition,
LocalSystemVectorType &RHS_Contribution,
Element::EquationIdVectorType &rEquationId,
const ProcessInfo &rCurrentProcessInfo) override
{
KRATOS_TRY
LocalSystemMatrixType Mass;
LocalSystemMatrixType Damp;
// Get Equation Id
rCurrentCondition.EquationIdVector(rEquationId,rCurrentProcessInfo);
// Get matrix contributions
rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo);
rCurrentCondition.CalculateMassMatrix(Mass,rCurrentProcessInfo);
rCurrentCondition.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo);
// Add the dynamic contributions to the local system using BDF2 coefficients
this->AddDynamicRHSContribution<Kratos::Condition>(rCurrentCondition,RHS_Contribution,Mass,rCurrentProcessInfo);
// Apply slip condition
mpRotationTool->Rotate(RHS_Contribution, rCurrentCondition.GetGeometry());
mpRotationTool->ApplySlipCondition(RHS_Contribution, rCurrentCondition.GetGeometry());
KRATOS_CATCH("")
}
/// Free memory allocated by this object.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "BDF2TurbulentScheme";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo& rCurrentProcessInfo)
{
KRATOS_TRY;
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
KRATOS_CATCH("");
}
/// Update Dof values after a Newton-Raphson iteration.
/**
* @param rDofSet Container for the Degrees of freedom in the system
* @param Dx Solution of the linear system
*/
virtual void UpdateDofs(
DofsArrayType& rDofSet,
TSystemVectorType& Dx)
{
KRATOS_TRY
const int n_dof = rDofSet.size();
#pragma omp parallel for
for (int i_dof = 0; i_dof < n_dof; ++i_dof) {
auto it_dof = rDofSet.begin() + i_dof;
if (it_dof->IsFree()) {
it_dof->GetSolutionStepValue() += TSparseSpace::GetValue(Dx, it_dof->EquationId());
}
}
KRATOS_CATCH("")
}
/// Update Dof values after a Newton-Raphson iteration
/**
* @param rModelPart fluid ModelPart
* @param rBDFcoefs Time stepping coefficients for this iteration.
*/
void UpdateAcceleration(
ModelPart& rModelPart,
const Vector& rBDFcoefs)
{
KRATOS_TRY
const double Coef0 = rBDFcoefs[0];
const double Coef1 = rBDFcoefs[1];
const double Coef2 = rBDFcoefs[2];
const int n_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
const auto& rVel0 = it_node->FastGetSolutionStepValue(VELOCITY);
const auto& rVel1 = it_node->FastGetSolutionStepValue(VELOCITY,1);
const auto& rVel2 = it_node->FastGetSolutionStepValue(VELOCITY,2);
auto& rAcceleration = it_node->FastGetSolutionStepValue(ACCELERATION);
rAcceleration = Coef0 * rVel0 + Coef1 * rVel1 + Coef2 * rVel2;
}
KRATOS_CATCH("")
}
void CombineLHSContributions(
LocalSystemMatrixType& rLHS,
LocalSystemMatrixType& rMass,
LocalSystemMatrixType& rDamp,
const ProcessInfo& rCurrentProcessInfo)
{
const double Coef0 = rCurrentProcessInfo.GetValue(BDF_COEFFICIENTS)[0];
if (rMass.size1() != 0) noalias(rLHS) += Coef0 * rMass;
if (rDamp.size1() != 0) noalias(rLHS) += rDamp;
}
template<class TObject>
void AddDynamicRHSContribution(
TObject& rObject,
LocalSystemVectorType& rRHS,
LocalSystemMatrixType& rMass,
const ProcessInfo& rCurrentProcessInfo)
{
if (rMass.size1() != 0)
{
const Vector& rCoefs = rCurrentProcessInfo.GetValue(BDF_COEFFICIENTS);
const auto& r_const_obj_ref = rObject;
LocalSystemVectorType Acc;
r_const_obj_ref.GetFirstDerivativesVector(Acc);
Acc *= rCoefs[0];
for(unsigned int n = 1; n < 3; ++n)
{
LocalSystemVectorType rVel;
r_const_obj_ref.GetFirstDerivativesVector(rVel,n);
noalias(Acc) += rCoefs[n] * rVel;
}
noalias(rRHS) -= prod(rMass,Acc);
}
}
void FullProjection(ModelPart& rModelPart)
{
const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// Initialize containers
const int n_nodes = rModelPart.NumberOfNodes();
const int n_elems = rModelPart.NumberOfElements();
const array_1d<double,3> zero_vect = ZeroVector(3);
#pragma omp parallel for firstprivate(zero_vect)
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto ind = rModelPart.NodesBegin() + i_node;
noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = zero_vect; // "x"
ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; // "x"
ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; // "Ml"
}
// Newton-Raphson parameters
const double RelTol = 1e-4 * rModelPart.NumberOfNodes();
const double AbsTol = 1e-6 * rModelPart.NumberOfNodes();
const unsigned int MaxIter = 100;
// iteration variables
unsigned int iter = 0;
array_1d<double,3> dMomProj = zero_vect;
double dMassProj = 0.0;
double RelMomErr = 1000.0 * RelTol;
double RelMassErr = 1000.0 * RelTol;
double AbsMomErr = 1000.0 * AbsTol;
double AbsMassErr = 1000.0 * AbsTol;
while( ( (AbsMomErr > AbsTol && RelMomErr > RelTol) || (AbsMassErr > AbsTol && RelMassErr > RelTol) ) && iter < MaxIter)
{
// Reinitialize RHS
#pragma omp parallel for firstprivate(zero_vect)
for (int i_node = 0; i_node < n_nodes; ++i_node)
{
auto ind = rModelPart.NodesBegin() + i_node;
noalias(ind->GetValue(ADVPROJ)) = zero_vect; // "b"
ind->GetValue(DIVPROJ) = 0.0; // "b"
ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; // Reset because Calculate will overwrite it
}
// Reinitialize errors
RelMomErr = 0.0;
RelMassErr = 0.0;
AbsMomErr = 0.0;
AbsMassErr = 0.0;
// Compute new values
array_1d<double, 3 > output;
#pragma omp parallel for private(output)
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
auto it_elem = rModelPart.ElementsBegin() + i_elem;
it_elem->Calculate(SUBSCALE_VELOCITY, output, rCurrentProcessInfo);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ);
// Update iteration variables
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto ind = rModelPart.NodesBegin() + i_node;
const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); // Ml dx = b - Mc x
dMomProj = ind->GetValue(ADVPROJ) / Area;
dMassProj = ind->GetValue(DIVPROJ) / Area;
RelMomErr += sqrt( dMomProj[0]*dMomProj[0] + dMomProj[1]*dMomProj[1] + dMomProj[2]*dMomProj[2]);
RelMassErr += fabs(dMassProj);
auto& rMomRHS = ind->FastGetSolutionStepValue(ADVPROJ);
double& rMassRHS = ind->FastGetSolutionStepValue(DIVPROJ);
rMomRHS += dMomProj;
rMassRHS += dMassProj;
AbsMomErr += sqrt( rMomRHS[0]*rMomRHS[0] + rMomRHS[1]*rMomRHS[1] + rMomRHS[2]*rMomRHS[2]);
AbsMassErr += fabs(rMassRHS);
}
if(AbsMomErr > 1e-10)
RelMomErr /= AbsMomErr;
else // If residual is close to zero, force absolute convergence to avoid division by zero errors
RelMomErr = 1000.0;
if(AbsMassErr > 1e-10)
RelMassErr /= AbsMassErr;
else
RelMassErr = 1000.0;
iter++;
}
KRATOS_INFO("BDF2TurbulentScheme") << "Performed OSS Projection in " << iter << " iterations" << std::endl;
}
void LumpedProjection(ModelPart& rModelPart)
{
const int n_nodes = rModelPart.NumberOfNodes();
const int n_elems = rModelPart.NumberOfElements();
const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
const array_1d<double,3> zero_vect = ZeroVector(3);
#pragma omp parallel for firstprivate(zero_vect)
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto itNode = rModelPart.NodesBegin() + i_node;
noalias(itNode->FastGetSolutionStepValue(ADVPROJ)) = zero_vect;
itNode->FastGetSolutionStepValue(DIVPROJ) = 0.0;
itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}
array_1d<double, 3 > Out;
#pragma omp parallel for private(Out)
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
auto itElem = rModelPart.ElementsBegin() + i_elem;
itElem->Calculate(ADVPROJ, Out, rCurrentProcessInfo);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ);
// Correction for periodic conditions
if (mrPeriodicIdVar.Key() != 0) {
this->PeriodicConditionProjectionCorrection(rModelPart);
}
const double zero_tol = 1.0e-12;
#pragma omp parallel for firstprivate(zero_tol)
for (int i_node = 0; i_node < n_nodes; ++i_node){
auto iNode = rModelPart.NodesBegin() + i_node;
if (iNode->FastGetSolutionStepValue(NODAL_AREA) < zero_tol) {
iNode->FastGetSolutionStepValue(NODAL_AREA) = 1.0;
}
const double Area = iNode->FastGetSolutionStepValue(NODAL_AREA);
iNode->FastGetSolutionStepValue(ADVPROJ) /= Area;
iNode->FastGetSolutionStepValue(DIVPROJ) /= Area;
}
KRATOS_INFO("BDF2TurbulentScheme") << "Computing OSS projections" << std::endl;
}
/** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on
* both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n
* 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n
* 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n
* 3- The value on all periodic nodes is replaced by the one received in step 2.
*/
void PeriodicConditionProjectionCorrection(ModelPart& rModelPart)
{
const int num_nodes = rModelPart.NumberOfNodes();
const int num_conditions = rModelPart.NumberOfConditions();
#pragma omp parallel for
for (int i = 0; i < num_nodes; i++) {
auto it_node = rModelPart.NodesBegin() + i;
it_node->SetValue(NODAL_AREA,0.0);
it_node->SetValue(ADVPROJ,ZeroVector(3));
it_node->SetValue(DIVPROJ,0.0);
}
#pragma omp parallel for
for (int i = 0; i < num_conditions; i++) {
auto it_cond = rModelPart.ConditionsBegin() + i;
if(it_cond->Is(PERIODIC)) {
this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry());
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ);
#pragma omp parallel for
for (int i = 0; i < num_nodes; i++) {
auto it_node = rModelPart.NodesBegin() + i;
this->CorrectContributionsOnPeriodicNode(*it_node);
}
}
void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry)
{
unsigned int nodes_in_cond = rGeometry.PointsNumber();
double nodal_area = 0.0;
array_1d<double,3> momentum_projection = ZeroVector(3);
double mass_projection = 0.0;
for ( unsigned int i = 0; i < nodes_in_cond; i++ )
{
auto& r_node = rGeometry[i];
nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA);
noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ);
mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ);
}
for ( unsigned int i = 0; i < nodes_in_cond; i++ )
{
auto& r_node = rGeometry[i];
/* Note that this loop is expected to be threadsafe in normal conditions,
* since each node should belong to a single periodic link. However, I am
* setting the locks for openmp in case that we try more complicated things
* in the future (like having different periodic conditions for different
* coordinate directions).
*/
r_node.SetLock();
r_node.GetValue(NODAL_AREA) = nodal_area;
noalias(r_node.GetValue(ADVPROJ)) = momentum_projection;
r_node.GetValue(DIVPROJ) = mass_projection;
r_node.UnSetLock();
}
}
void CorrectContributionsOnPeriodicNode(Node<3>& rNode)
{
//TODO: This needs to be done in another manner as soon as we start using non-historical NODAL_AREA
if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set.
{
rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA);
noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ);
rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ);
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Pointer to a turbulence model
Process::Pointer mpTurbulenceModel = nullptr;
RotationToolPointerType mpRotationTool = nullptr;
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
const Kratos::Variable<int>& mrPeriodicIdVar;
// ///@}
// ///@name Serialization
// ///@{
//
// friend class Serializer;
//
// virtual void save(Serializer& rSerializer) const
// {
// KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, BaseType );
// rSerializer.save("mpTurbulenceModel",mpTurbulenceModel);
// }
//
// virtual void load(Serializer& rSerializer)
// {
// KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, BaseType );
// rSerializer.load("mpTurbulenceModel",mpTurbulenceModel);
// }
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BDF2TurbulentScheme & operator=(BDF2TurbulentScheme const& rOther)
{}
/// Copy constructor.
BDF2TurbulentScheme(BDF2TurbulentScheme const& rOther)
{}
///@}
}; // Class BDF2TurbulentScheme
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpace,class TDenseSpace>
inline std::istream& operator >>(std::istream& rIStream,BDF2TurbulentScheme<TSparseSpace,TDenseSpace>& rThis)
{
return rIStream;
}
/// output stream function
template<class TSparseSpace,class TDenseSpace>
inline std::ostream& operator <<(std::ostream& rOStream,const BDF2TurbulentScheme<TSparseSpace,TDenseSpace>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED defined
|
level.c | // RUN: %compile-run-and-check
#include <omp.h>
#include <stdio.h>
const int MaxThreads = 1024;
const int NumThreads = 64;
int main(int argc, char *argv[]) {
int level = -1, activeLevel = -1;
// The expected value is -1, initialize to different value.
int ancestorTNumNeg = 1, teamSizeNeg = 1;
int ancestorTNum0 = -1, teamSize0 = -1;
// The expected value is -1, initialize to different value.
int ancestorTNum1 = 1, teamSize1 = 1;
int check1[MaxThreads];
int check2[MaxThreads];
int check3[MaxThreads];
int check4[MaxThreads];
for (int i = 0; i < MaxThreads; i++) {
check1[i] = check2[i] = check3[i] = check4[i] = 0;
}
#pragma omp target map(level, activeLevel, ancestorTNumNeg, teamSizeNeg) \
map(ancestorTNum0, teamSize0, ancestorTNum1, teamSize1) \
map(check1[:], check2[:], check3[:], check4[:])
{
level = omp_get_level();
activeLevel = omp_get_active_level();
// Expected to return -1.
ancestorTNumNeg = omp_get_ancestor_thread_num(-1);
teamSizeNeg = omp_get_team_size(-1);
// Expected to return 0 and 1.
ancestorTNum0 = omp_get_ancestor_thread_num(0);
teamSize0 = omp_get_team_size(0);
// Expected to return -1 because the requested level is larger than
// the nest level.
ancestorTNum1 = omp_get_ancestor_thread_num(1);
teamSize1 = omp_get_team_size(1);
// Expecting active parallel region.
#pragma omp parallel num_threads(NumThreads)
{
int id = omp_get_thread_num();
// Multiply return value of omp_get_level by 5 to avoid that this test
// passes if both API calls return wrong values.
check1[id] += omp_get_level() * 5 + omp_get_active_level();
// Expected to return 0 and 1.
check2[id] += omp_get_ancestor_thread_num(0) + 5 * omp_get_team_size(0);
// Expected to return the current thread num.
check2[id] += (omp_get_ancestor_thread_num(1) - id);
// Expected to return the current number of threads.
check2[id] += 3 * omp_get_team_size(1);
// Expected to return -1, see above.
check2[id] += omp_get_ancestor_thread_num(2) + omp_get_team_size(2);
// Expecting serialized parallel region.
#pragma omp parallel
{
#pragma omp atomic
check3[id] += omp_get_level() * 5 + omp_get_active_level();
// Expected to return 0 and 1.
int check4Inc = omp_get_ancestor_thread_num(0) + 5 * omp_get_team_size(0);
// Expected to return the parent thread num.
check4Inc += (omp_get_ancestor_thread_num(1) - id);
// Expected to return the number of threads in the active parallel region.
check4Inc += 3 * omp_get_team_size(1);
// Exptected to return 0 and 1.
check4Inc += omp_get_ancestor_thread_num(2) + 3 * omp_get_team_size(2);
// Expected to return -1, see above.
check4Inc += omp_get_ancestor_thread_num(3) + omp_get_team_size(3);
#pragma omp atomic
check4[id] += check4Inc;
}
}
}
// CHECK: target: level = 0, activeLevel = 0
printf("target: level = %d, activeLevel = %d\n", level, activeLevel);
// CHECK: level = -1: ancestorTNum = -1, teamSize = -1
printf("level = -1: ancestorTNum = %d, teamSize = %d\n", ancestorTNumNeg, teamSizeNeg);
// CHECK: level = 0: ancestorTNum = 0, teamSize = 1
printf("level = 0: ancestorTNum = %d, teamSize = %d\n", ancestorTNum0, teamSize0);
// CHECK: level = 1: ancestorTNum = -1, teamSize = -1
printf("level = 1: ancestorTNum = %d, teamSize = %d\n", ancestorTNum1, teamSize1);
// CHECK-NOT: invalid
for (int i = 0; i < MaxThreads; i++) {
// Check active parallel region:
// omp_get_level() = 1, omp_get_active_level() = 1
const int Expected1 = 6;
if (i < NumThreads) {
if (check1[i] != Expected1) {
printf("invalid: check1[%d] should be %d, is %d\n", i, Expected1, check1[i]);
}
} else if (check1[i] != 0) {
printf("invalid: check1[%d] should be 0, is %d\n", i, check1[i]);
}
// 5 * 1 + 3 * 64 - 1 - 1 (see above)
const int Expected2 = 195;
if (i < NumThreads) {
if (check2[i] != Expected2) {
printf("invalid: check2[%d] should be %d, is %d\n", i, Expected2, check2[i]);
}
} else if (check2[i] != 0) {
printf("invalid: check2[%d] should be 0, is %d\n", i, check2[i]);
}
// Check serialized parallel region:
// omp_get_level() = 2, omp_get_active_level() = 1
const int Expected3 = 11;
if (i < NumThreads) {
if (check3[i] != Expected3) {
printf("invalid: check3[%d] should be %d, is %d\n", i, Expected3, check3[i]);
}
} else if (check3[i] != 0) {
printf("invalid: check3[%d] should be 0, is %d\n", i, check3[i]);
}
// 5 * 1 + 3 * 64 + 3 * 1 - 1 - 1 (see above)
const int Expected4 = 198;
if (i < NumThreads) {
if (check4[i] != Expected4) {
printf("invalid: check4[%d] should be %d, is %d\n", i, Expected4, check4[i]);
}
} else if (check4[i] != 0) {
printf("invalid: check4[%d] should be 0, is %d\n", i, check4[i]);
}
}
// Check for paraller level in non-SPMD kernels.
level = 0;
#pragma omp target teams distribute num_teams(1) thread_limit(32) reduction(+:level)
for (int i=0; i<5032; i+=32) {
int ub = (i+32 > 5032) ? 5032 : i+32;
#pragma omp parallel for schedule(dynamic)
for (int j=i ; j < ub; j++) ;
level += omp_get_level();
}
// CHECK: Integral level = 0.
printf("Integral level = %d.\n", level);
return 0;
}
|
single.c | /* PMSIS includes */
#include "pmsis.h"
#include "omp.h"
#define NUM_THREADS (6)
static int a[NUM_THREADS] = {0};
static uint32_t errors = 0;
/* Cluster main entry, executed by core 0. */
void cluster_delegate(void *arg)
{
printf("Cluster delegate\n");
#pragma omp parallel num_threads(NUM_THREADS)
{
#pragma omp single
{
a[omp_get_thread_num()] = 1;
}
}
int count = 0;
for (int i = 0; i < NUM_THREADS; i++)
{
if (a[i] == 1)
{
count++;
}
}
if (count != 1)
{
printf("Incorrect number: %d, expected 1\n", count);
errors++;
}
printf("Cluster master core exit\n");
}
void helloworld(void)
{
printf("Entering main controller\n");
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
struct pi_device cluster_dev = {0};
struct pi_cluster_conf cl_conf = {0};
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task = {0};
cl_task.entry = cluster_delegate;
cl_task.arg = NULL;
pi_cluster_send_task_to_cl(&cluster_dev, &cl_task);
pi_cluster_close(&cluster_dev);
if (errors)
{
printf("Test failed with %d errors\n", errors);
}
else
{
printf("Test success !\n");
}
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** PMSIS HelloWorld ***\n\n");
return pmsis_kickoff((void *) helloworld);
}
|
dgemvT_save.c | #define max(a,b) (((a) < (b))? (b) : (a))
#define min(a,b) (((a) < (b))? (a) : (b))
#define _TH_1 2
#include <omp.h>
void dgemvT(const int M,const int N,const double alpha,const double* A,const int lda,const double* X,const int incX,const double beta,double* Y,const int incY) {
int i;
int j;
int i_bk_1;
int i_bk_2;
int j_bk_3;
omp_set_num_threads(_TH_1);
#pragma omp parallel
{
/*@;BEGIN(nest1_group3=Nest)@*/#pragma omp for private(i,j,i_bk_1,i_bk_2,j_bk_3)
for (i_bk_1=0; i_bk_1<M; i_bk_1+=256)
{
/*@;BEGIN(nest1_group2=Nest)@*/for (i_bk_2=0; i_bk_2<-31+min(256,M-i_bk_1); i_bk_2+=32)
{
if ((j_bk_3=0)<-31+N)
{
for (i=0; i<32; i+=4)
{
j = 0;
{
Y[i_bk_1+(i_bk_2+i)] = beta*Y[i_bk_1+(i_bk_2+i)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+i*lda)))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(1+i))] = beta*Y[i_bk_1+(i_bk_2+(1+i))];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(2+i))] = beta*Y[i_bk_1+(i_bk_2+(2+i))];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(2*lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(3+i))] = beta*Y[i_bk_1+(i_bk_2+(3+i))];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(3*lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
}
for (j=4; j<32; j+=4)
{
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+i*lda)))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(2*lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(3*lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
}
}
}
/*@;BEGIN(nest2_group2=Nest)@*/for (j_bk_3=32; j_bk_3<-31+N; j_bk_3+=32)
{
/*@;BEGIN(nest1=Nest)@*/for (i=0; i<32; i+=4)
{
/*@;BEGIN(nest2=Nest)@*/for (j=0; j<32; j+=4)
{
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+i*lda)))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(2*lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(3*lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j)))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j)))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[i*lda+(lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[i*lda+(2*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[i*lda+(3*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j)))))]*X[j_bk_3+(3+j)];
}
}
}
if (j_bk_3<N)
{
for (i=0; i<32; i+=4)
{
for (j=0; j<N-j_bk_3; j+=1)
{
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+i*lda)))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(1+i))] = Y[i_bk_1+(i_bk_2+(1+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(2+i))] = Y[i_bk_1+(i_bk_2+(2+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(2*lda+i*lda))))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+(3+i))] = Y[i_bk_1+(i_bk_2+(3+i))]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+(3*lda+i*lda))))]*X[j_bk_3+j];
}
}
}
}
if (i_bk_2<min(256,M-i_bk_1))
{
if ((j_bk_3=0)<-31+N)
{
for (i=0; i<min(256-i_bk_2,-i_bk_2+(M-i_bk_1)); i+=1)
{
j = 0;
{
Y[i_bk_1+(i_bk_2+i)] = beta*Y[i_bk_1+(i_bk_2+i)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+i*lda)))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j))))]*X[j_bk_3+(3+j)];
}
for (j=4; j<32; j+=4)
{
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+i*lda)))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j))))]*X[j_bk_3+(3+j)];
}
}
}
for (j_bk_3=32; j_bk_3<-31+N; j_bk_3+=32)
{
for (i=0; i<min(256-i_bk_2,-i_bk_2+(M-i_bk_1)); i+=1)
{
for (j=0; j<32; j+=4)
{
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+i*lda)))]*X[j_bk_3+j];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(1+j))))]*X[j_bk_3+(1+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(2+j))))]*X[j_bk_3+(2+j)];
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[i*lda+(i_bk_2*lda+(i_bk_1*lda+(j_bk_3+(3+j))))]*X[j_bk_3+(3+j)];
}
}
}
if (j_bk_3<N)
{
for (i=0; i<min(256-i_bk_2,-i_bk_2+(M-i_bk_1)); i+=1)
{
for (j=0; j<N-j_bk_3; j+=1)
{
Y[i_bk_1+(i_bk_2+i)] = Y[i_bk_1+(i_bk_2+i)]+A[j+(j_bk_3+(i_bk_1*lda+(i_bk_2*lda+i*lda)))]*X[j_bk_3+j];
}
}
}
}
}
}
}
|
k2.c | #include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <unistd.h>
#include <omp.h>
#include <time.h>
#include "list.h"
#include "matrix.h"
#include "bnet.h"
#include "rand.h"
#define MPI 0
#define VERBOSE 0
#define SAVE_NETWORKS 0
#if MPI
#include <mpi.h>
#endif
double dirichlet_score_family(Matrix *counts, CPD *cpd) {
Matrix *ns = cpd->sizes, *prior = cpd->dirichlet;
Matrix *ns_self = matrix_sub_indices(ns, ns->rows - 1, ns->rows, 0, ns->cols);
Matrix *pnc = matrix_add_int_double(counts, prior);
Matrix *gamma_pnc = matrix_lgamma(pnc), *gamma_prior = matrix_lgamma(prior);
matrix_delete(pnc);
Matrix *lu_mat = matrix_double_subtract(gamma_pnc, gamma_prior);
matrix_delete(gamma_pnc);
matrix_delete(gamma_prior);
Matrix *LU = matrix_double_sum_n_cols(lu_mat, *(int *) matrix_element_by_index(ns_self, 0));
matrix_delete(lu_mat);
Matrix *alpha_ij = matrix_double_sum_n_cols(prior, *(int *) matrix_element_by_index(ns_self, 0));
Matrix *N_ij = matrix_sum_n_cols(counts, *(int *) matrix_element_by_index(ns_self, 0));
matrix_scrap(ns_self);
Matrix *gamma_alpha = matrix_lgamma(alpha_ij);
Matrix *alpha_N = matrix_add_int_double(N_ij, alpha_ij);
matrix_delete(N_ij);
matrix_delete(alpha_ij);
Matrix *gamma_alpha_N = matrix_lgamma(alpha_N);
matrix_delete(alpha_N);
Matrix *LV = matrix_double_subtract(gamma_alpha, gamma_alpha_N);
matrix_delete(gamma_alpha);
matrix_delete(gamma_alpha_N);
Matrix *LU_LV = matrix_double_add(LU, LV);
matrix_delete(LU);
matrix_delete(LV);
double score = matrix_double_sum(LU_LV);
matrix_delete(LU_LV);
return score;
}
int count_index(Matrix *sz, Matrix *sample_data, int col) {
Matrix *mat_col = matrix_sub_col(sample_data, col);
int index = 0;
int **id = (int **) mat_col->data, **dd = (int **) sz->data;
for (int i = 0, m = 1; i < mat_col->rows * mat_col->cols; m *= *dd[i++]) {
assert((*id[i]) - 1 < *dd[i]);
index += ((*id[i]) - 1) * m;
}
matrix_scrap(mat_col);
return index;
}
Matrix *compute_counts(Matrix *data, Matrix *sz) {
assert(sz->rows == data->rows);
Matrix *count = matrix_zeros(matrix_prod(sz), 1);
for (int i = 0; i < data->cols; ++i) {
*((int *) matrix_element_by_index(count, count_index(sz, data, i))) += 1;
}
return count;
}
double log_marg_prob_node(CPD *cpd, Matrix *self_ev, Matrix *pev) {
assert(self_ev->rows == 1);
assert(cpd->sizes->cols == 1);
assert(pev->cols == self_ev->cols);
Matrix *data = matrix_sub_concat_rows(pev, self_ev, false);
Matrix *counts = compute_counts(data, cpd->sizes);
matrix_scrap(data);
double score = dirichlet_score_family(counts, cpd);
matrix_delete(counts);
return score;
}
Matrix *prob_node(CPD *cpd, Matrix *self_ev, Matrix *pev) {
Matrix *sample_data = matrix_sub_concat_rows(pev, self_ev, false);
Matrix *prob = matrix_double_zeros(sample_data->rows, sample_data->cols);
for (int i = 0; i < sample_data->cols; ++i) {
Matrix *mat_col = matrix_sub_col(sample_data, i);
int index = 0;
int **id = (int **) mat_col->data, **dd = (int **) cpd->sizes->data;
for (int j = 0, m = 1; j < mat_col->rows * mat_col->cols; m *= *dd[j++]) {
assert((*id[j]) - 1 < *dd[j]);
index += ((*id[j]) - 1) * m;
}
matrix_scrap(mat_col);
*(double *) matrix_element_by_index(prob, i) = *(double *) matrix_element_by_index(cpd->cpt, index);
}
matrix_scrap(sample_data);
return prob;
}
double log_prob_node(CPD *cpd, Matrix *self_ev, Matrix *pev) {
double score = 0;
Matrix *p = prob_node(cpd, self_ev, pev);
double **data = (double **) p->data;
for (int i = 0; i < p->rows * p->cols; ++i, ++data) {
double d = **data;
score += d <= 0 ? DBL_MIN : log(d);
}
matrix_delete(p);
return score;
}
CPD *tabular_CPD(Matrix *dag, Matrix *ns, int self) {
CPD *cpd = malloc(sizeof(CPD));
List *ps = adjacency_matrix_parents(dag, self);
list_push_int(ps, self);
Matrix *fam_sz = matrix_zeros(ps->count, 1);
for (int i = 0; i < ps->count; ++i) {
*(int *) matrix_element_by_index(fam_sz, i) = *(int *) matrix_element_by_index(ns, list_get_int(ps, i));
}
cpd->sizes = fam_sz;
Matrix *calc = matrix_sub_indices(fam_sz, 0, ps->count - 1, 0, 1);
int psz = matrix_prod(calc);
list_delete(ps);
matrix_scrap(calc);
cpd->dirichlet = matrix_double_create(matrix_prod(fam_sz), 1, (1.0 / psz) * (1.0 / *(int *) matrix_element_by_index(ns, self)));
cpd->cpt = NULL;
return cpd;
}
double score_family(int j, List *ps, Matrix *ns, List *discrete, Matrix *data, char *scoring_fn) {
Matrix *dag = matrix_zeros(data->rows, data->rows);
if (ps->count > 0) {
Matrix *dag_sub = matrix_sub_list_index(dag, ps, j, j + 1);
matrix_set(dag_sub, 1);
matrix_scrap(dag_sub);
//TODO: sort `ps` here.
}
Matrix *data_sub_1 = matrix_sub_indices(data, j, j + 1, 0, data->cols),
*data_sub_2 = matrix_sub_list_index(data, ps, 0, data->cols);
CPD *cpd = tabular_CPD(dag, ns, j);
double score;
if (!strcmp(scoring_fn, "bayesian")) {
score = log_marg_prob_node(cpd, data_sub_1, data_sub_2);
} else if (!strcmp(scoring_fn, "bic")) {
List *fam = list_slice(ps, 0, ps->count);
int a_index = list_push_int(fam, j);
Matrix *data_sub_3 = matrix_sub_list_index(data, fam, 0, data->cols);
Matrix *counts = compute_counts(data_sub_3, cpd->sizes);
matrix_scrap(data_sub_3);
cpd->cpt = matrix_add_int_double(counts, cpd->dirichlet);
matrix_delete(counts);
matrix_double_mk_stochastic(cpd->cpt, ns);
double L = log_prob_node(cpd, data_sub_1, data_sub_2);
Matrix *sz = cpd->sizes;
int *last = (int *) sz->data[sz->rows * sz->cols - 1];
--*last;
score = L - 0.5 * matrix_prod(sz) * log(data->cols);
++*last;
free(list_remove(fam, a_index));
list_scrap(fam);
} else {
assert(1 == 2);
}
cpd_delete(cpd);
matrix_scrap(data_sub_1);
matrix_scrap(data_sub_2);
matrix_delete(dag);
return score;
}
Matrix *learn_struct_K2(Matrix *data, Matrix *ns, List *order, char *scoring_fn, int max_parents) {
assert(order->count == data->rows);
int n = data->rows;
int max_fan_in = max_parents == 0 ? n : max_parents;
List *discrete = list_empty();
for (int i = 0; i < n; ++i) list_push_int(discrete, i);
Matrix *dag = matrix_zeros(n, n);
int parent_order = 0;
for (int i = 0; i < n; ++i) {
List *ps = list_empty();
int j = list_get_int(order, i);
double score = score_family(j, ps, ns, discrete, data, scoring_fn);
#if VERBOSE
printf("\nnode %d, empty score %6.4f\n", j, score);
#endif
for (; ps->count <= max_fan_in;) {
List *order_sub = list_slice(order, 0, i);
List *pps = list_difference_type_int(order_sub, ps);
list_scrap(order_sub);
int nps = pps->count;
Matrix *pscore = matrix_double_zeros(1, nps);
for (int pi = 0; pi < nps; ++pi) {
int p = list_get_int(pps, pi);
int n_index = list_push_int(ps, p);
*((double *) matrix_element_by_index(pscore, pi)) = score_family(j, ps, ns, discrete, data, scoring_fn);
#if VERBOSE
printf("considering adding %d to %d, score %6.4f\n", p, j, *((double *) matrix_element_by_index(pscore, pi)));
#endif
free(list_remove(ps, n_index));
}
double best_pscore = -DBL_MAX;
int best_p = -1;
for (int i = 0; i < nps; ++i) {
double d = *(double *) matrix_element_by_index(pscore, i);
if (d > best_pscore) {
best_pscore = d;
best_p = i;
}
}
matrix_delete(pscore);
if (best_p == -1) {
list_scrap(pps);
break;
}
best_p = list_get_int(pps, best_p);
list_scrap(pps);
if (best_pscore > score) {
score = best_pscore;
list_push_int(ps, best_p);
#if VERBOSE
printf("* adding %d to %d, score %6.4f\n", best_p, j, best_pscore);
#endif
} else {
break;
}
}
if (ps->count > 0) {
Matrix *dag_sub = matrix_sub_list_index(dag, ps, j, j + 1);
matrix_set(dag_sub, ++parent_order);
matrix_scrap(dag_sub);
}
list_delete(ps);
}
list_delete(discrete);
return dag;
}
#if MPI
#define MPI_TAG_MATRIX_R 1
#define MPI_TAG_MATRIX_C 2
#define MPI_TAG_MATRIX_D 3
void MPI_Matrix_Send(int to_index, Matrix *matrix) {
int rows = matrix->rows, cols = matrix->cols;
int **data = (int **) matrix->data;
int *extracted = malloc(rows * cols * sizeof(int));
int *extracted_start = extracted;
for (int i = 0; i < rows * cols; ++i, ++extracted, ++data) {
*extracted = **data;
}
MPI_Send(&rows, 1, MPI_INT, to_index, MPI_TAG_MATRIX_R, MPI_COMM_WORLD);
MPI_Send(&cols, 1, MPI_INT, to_index, MPI_TAG_MATRIX_C, MPI_COMM_WORLD);
MPI_Send(extracted_start, rows * cols, MPI_INT, to_index, MPI_TAG_MATRIX_D, MPI_COMM_WORLD);
free(extracted_start);
}
Matrix *MPI_Matrix_Recv(int from_index) {
int rows, cols;
MPI_Recv(&rows, 1, MPI_INT, from_index, MPI_TAG_MATRIX_R, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&cols, 1, MPI_INT, from_index, MPI_TAG_MATRIX_C, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
Matrix *matrix = matrix_zeros(rows, cols);
int **data = (int **) matrix->data;
int *values = malloc(rows * cols * sizeof(int));
MPI_Recv(values, rows * cols, MPI_INT, from_index, MPI_TAG_MATRIX_D, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
int *values_start = values;
for (int i = 0; i < rows * cols; ++i, ++values, ++data) {
**data = *values;
}
free(values_start);
return matrix;
}
#endif
int exec(int forkIndex, int forkSize, bool data_transposed, char *f_data, int topologies, char *f_output, char *scoring_fn, int max_parents) {
Matrix *data = matrix_from_file(f_data, data_transposed), *sz = matrix_create_sz(data);
#if MPI
assert(forkIndex > -1);
assert(forkSize > 0);
int top_d = topologies / forkSize, top_r = topologies % forkSize;
if (forkIndex < top_r) ++top_d;
topologies = top_d;
#endif
Matrix *orders = matrix_zeros(data->rows * topologies, data->rows);
#if SAVE_NETWORKS
Matrix *networks = matrix_zeros(data->rows * topologies, data->rows * data->rows);
#endif
#pragma omp parallel for
for (int r = 0; r < orders->rows; ++r) {
int start = r / topologies;
int *arr = malloc(orders->cols * sizeof(int));
arr[0] = start;
for (int i = 1; i < orders->cols; ++i) {
arr[i] = i == start ? 0 : i;
}
shuffle_int(orders->cols - 1, arr + 1);
for (int c = 0; c < orders->cols; ++c) {
*(int *) matrix_element(orders, r, c) = arr[c];
}
free(arr);
}
Matrix *consensus_network = matrix_zeros(data->rows, data->rows);
int cn_n_elements = consensus_network->rows * consensus_network->cols;
#pragma omp parallel for
for (int o = 0; o < orders->rows; ++o) {
Matrix *m_order = matrix_sub_indices(orders, o, o + 1, 0, orders->cols);
List *order = matrix_to_list(m_order);
Matrix *bnet = learn_struct_K2(data, sz, order, scoring_fn, max_parents);
assert(consensus_network->rows == bnet->rows);
assert(consensus_network->cols == bnet->cols);
#pragma omp critical
for (int i = 0; i < cn_n_elements; ++i) {
*(int *) matrix_element_by_index(consensus_network, i) += *(int *) matrix_element_by_index(bnet, i) ? 1 : 0;
}
#if SAVE_NETWORKS
for (int i = 0; i < cn_n_elements; ++i) {
*(int *) matrix_element_by_index(networks, i + cn_n_elements * o) = *(int *) matrix_element_by_index(bnet, i);
}
#endif
matrix_delete(bnet);
list_delete(order);
matrix_scrap(m_order);
}
matrix_delete(sz);
matrix_delete(data);
#if MPI
//TODO: merge and write topologies
if (forkIndex == 0) {
for (int i = 1; i < forkSize; ++i) {
Matrix *merge = MPI_Matrix_Recv(i);
matrix_add_in(consensus_network, merge);
matrix_delete(merge);
}
#if SAVE_NETWORKS
for (int i = 1; i < forkSize; ++i) {
Matrix *merge = MPI_Matrix_Recv(i), *old = orders;
orders = matrix_sub_concat_rows(orders, merge, true);
matrix_scrap(old);
matrix_scrap(merge);
}
for (int i = 1; i < forkSize; ++i) {
Matrix *merge = MPI_Matrix_Recv(i), *old = networks;
networks = matrix_sub_concat_rows(networks, merge, true);
matrix_scrap(old);
matrix_scrap(merge);
}
#endif
} else {
MPI_Matrix_Send(0, consensus_network);
#if SAVE_NETWORKS
MPI_Matrix_Send(0, orders);
MPI_Matrix_Send(0, networks);
#endif
}
#endif
if (forkIndex == 0) {
matrix_to_file(consensus_network, f_output);
#if SAVE_NETWORKS
matrix_to_file(networks, "networks.csv");
matrix_to_file(orders, "topologies.csv");
#endif
}
#if SAVE_NETWORKS
matrix_delete(networks);
#endif
matrix_delete(orders);
matrix_delete(consensus_network);
return 0;
}
int main(int argc, char **argv) {
int forkIndex = 0, forkSize = 1;
#if MPI
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &forkIndex);
MPI_Comm_size(MPI_COMM_WORLD, &forkSize);
#endif
srand(time(NULL) ^ forkIndex);
int threads = 1, topologies = 1, max_parents = 0;
bool data_transposed = false;
char *data = NULL, *output = "consensus.csv";
char *scoring_fn = "bayesian";
int c;
while ((c = getopt(argc, argv, "Thp:d:t:o:s:m:")) != -1) {
switch (c) {
case 'T': {
data_transposed = true;
break;
}
case 'p': {
threads = atoi(optarg);
assert(threads > 0);
assert(threads <= omp_get_num_procs());
break;
}
case 'm': {
max_parents = atoi(optarg);
assert(max_parents >= 0);
break;
}
case 'd': {
data = optarg;
break;
}
case 't': {
topologies = atoi(optarg);
break;
}
case 'o': {
output = optarg;
break;
}
case 's': {
scoring_fn = optarg;
break;
}
case 'h':
default: {
puts(": -p <num_threads> -d <data file> -t <topologies per gene> -o <output file> -m <max parents>");
puts("~ -T (reads matrix transposed)");
return 1;
}
}
}
if (data == NULL) {
puts("You must send a data file using -d <file name>.");
return 1;
}
omp_set_num_threads(threads);
int status = exec(forkIndex, forkSize, data_transposed, data, topologies, output, scoring_fn, max_parents);
#if MPI
MPI_Finalize();
#endif
return status;
}
|
genprimes.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
// Main func.
int main(int argc, char * argv[]) {
// Check arguments
if (argc != 3) {
printf("Incorrect number of arguments passed. 3 needed. %d passed.\n", argc);
exit(1);
}
// Declare (and initialize) vars.
// Initialize upper bound for prime number search space and total threads requested
int range = atoi(argv[1]);
int nb_threads = atoi(argv[2]);
// Declare array for prime number search space
int primes[range - 1];
// Declare vars. for time tracking
double start_time, time_taken;
// Initialize vars. for output generation
int rank = 1;
int prev_prime = 2;
char out_filename[100] = "";
// Generate primes
// Initialize start time var.
start_time = omp_get_wtime();
#pragma omp parallel num_threads(nb_threads)
{
// Step 1: Generate all nbs. in search space (as specified by range).
#pragma omp for
for (int i = 2; i <= range; i++)
primes[i - 2] = i;
// Step 2: Remove composite nbs.
#pragma omp for
for (int j = 2; j <= ((range + 1) / 2); j++)
if (primes[j - 2] != 0)
for (int k = (j - 1); k < (range - 1); k++)
if ((primes[k] % j) == 0)
primes[k] = 0;
}
// Compute and print time taken for prime number generation
time_taken = omp_get_wtime() - start_time;
printf("Time taken for the main part: %f\n", time_taken);
// Generate output file
// Create output file name
sprintf(out_filename, "%d.txt", range);
// Create FILE object
FILE * out_file = fopen(out_filename, "w");
if (!out_file) {
printf("Cannot create output file %s.\n", out_filename);
exit(1);
}
// Write output to FILE object
for (int i = 0; i < (range - 1); i++) {
if (primes[i] != 0) {
fprintf(out_file, "%d, %d, %d\n", rank++, primes[i], (primes[i] - prev_prime));
prev_prime = primes[i];
}
}
// Close FILE object
fclose(out_file);
} |
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickWand Deprecated Methods %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define PixelViewId "PixelView"
/*
Typedef declarations.
*/
struct _PixelView
{
size_t
id;
char
name[MaxTextExtent];
ExceptionInfo
*exception;
MagickWand
*wand;
CacheView
*view;
RectangleInfo
region;
size_t
number_threads;
PixelWand
***pixel_wands;
MagickBooleanType
debug;
size_t
signature;
};
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w A l l o c a t e W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAllocateWand() allocates an initial drawing wand which is an opaque
% handle required by the remaining drawing methods.
%
% The format of the DrawAllocateWand method is:
%
% DrawingWand DrawAllocateWand(const DrawInfo *draw_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: Initial drawing defaults. Set to NULL to use defaults.
%
% o image: the image to draw on.
%
*/
WandExport DrawingWand *DrawAllocateWand(const DrawInfo *draw_info,Image *image)
{
return(AcquireDrawingWand(draw_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickAverageImages() average a set of images.
%
% The format of the MagickAverageImages method is:
%
% MagickWand *MagickAverageImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
static MagickWand *CloneMagickWandFromImages(const MagickWand *wand,
Image *images)
{
MagickWand
*clone_wand;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand));
if (clone_wand == (MagickWand *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
images->filename);
(void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand));
clone_wand->id=AcquireWandId();
(void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g",
MagickWandId,(double) clone_wand->id);
clone_wand->exception=AcquireExceptionInfo();
InheritException(clone_wand->exception,wand->exception);
clone_wand->image_info=CloneImageInfo(wand->image_info);
clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info);
clone_wand->images=images;
clone_wand->debug=IsEventLogging();
if (clone_wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name);
clone_wand->signature=WandSignature;
return(clone_wand);
}
WandExport MagickWand *MagickAverageImages(MagickWand *wand)
{
Image
*average_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
average_image=EvaluateImages(wand->images,MeanEvaluateOperator,
wand->exception);
if (average_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,average_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelView() makes a copy of the specified pixel view.
%
% The format of the ClonePixelView method is:
%
% PixelView *ClonePixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelView *ClonePixelView(const PixelView *pixel_view)
{
PixelView
*clone_view;
register ssize_t
i;
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) clone_view->id);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,pixel_view->exception);
clone_view->view=CloneCacheView(pixel_view->view);
clone_view->region=pixel_view->region;
clone_view->number_threads=pixel_view->number_threads;
for (i=0; i < (ssize_t) pixel_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
pixel_view->pixel_wands[i],pixel_view->region.width);
clone_view->debug=pixel_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelView() deallocates memory associated with a pixel view.
%
% The format of the DestroyPixelView method is:
%
% PixelView *DestroyPixelView(PixelView *pixel_view,
% const size_t number_wands,const size_t number_threads)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
% o number_wand: the number of pixel wands.
%
% o number_threads: number of threads.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport PixelView *DestroyPixelView(PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands,
pixel_view->region.width,pixel_view->number_threads);
pixel_view->view=DestroyCacheView(pixel_view->view);
pixel_view->exception=DestroyExceptionInfo(pixel_view->exception);
pixel_view->signature=(~WandSignature);
RelinquishWandId(pixel_view->id);
pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view);
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferPixelViewIterator() iterates over three pixel views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel region is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination pixel view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferPixelViewIterator method is:
%
% MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source,
% PixelView *duplex,PixelView *destination,
% DuplexTransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o duplex: the duplex pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferPixelViewIterator(
PixelView *source,PixelView *duplex,PixelView *destination,
DuplexTransferPixelViewMethod transfer,void *context)
{
#define DuplexTransferPixelViewTag "PixelView/DuplexTransfer"
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict duplex_indexes,
*magick_restrict indexes;
register const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y,
duplex->region.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag,
progress++,source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a pixel view.
%
% The format of the GetPixelViewException method is:
%
% char *GetPixelViewException(const PixelWand *pixel_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel pixel_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetPixelViewException(const PixelView *pixel_view,
ExceptionType *severity)
{
char
*description;
assert(pixel_view != (const PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=pixel_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
*description='\0';
if (pixel_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->reason),
MaxTextExtent);
if (pixel_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w H e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewHeight() returns the pixel view height.
%
% The format of the GetPixelViewHeight method is:
%
% size_t GetPixelViewHeight(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewHeight(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.height);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewIterator() iterates over the pixel view in parallel and calls
% your get method for each scanline of the view. The pixel region is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetPixelViewIterator method is:
%
% MagickBooleanType GetPixelViewIterator(PixelView *source,
% GetPixelViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetPixelViewIterator(PixelView *source,
GetPixelViewMethod get,void *context)
{
#define GetPixelViewTag "PixelView/Get"
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,GetPixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewPixels() returns the pixel view pixel_wands.
%
% The format of the GetPixelViewPixels method is:
%
% PixelWand *GetPixelViewPixels(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view)
{
const int
id = GetOpenMPThreadId();
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWand() returns the magick wand associated with the pixel view.
%
% The format of the GetPixelViewWand method is:
%
% MagickWand *GetPixelViewWand(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W i d t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWidth() returns the pixel view width.
%
% The format of the GetPixelViewWidth method is:
%
% size_t GetPixelViewWidth(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewWidth(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w X %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewX() returns the pixel view x offset.
%
% The format of the GetPixelViewX method is:
%
% ssize_t GetPixelViewX(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewX(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.x);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w Y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewY() returns the pixel view y offset.
%
% The format of the GetPixelViewY method is:
%
% ssize_t GetPixelViewY(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewY(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.y);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPixelView() returns MagickTrue if the the parameter is verified as a pixel
% view container.
%
% The format of the IsPixelView method is:
%
% MagickBooleanType IsPixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view)
{
size_t
length;
if (pixel_view == (const PixelView *) NULL)
return(MagickFalse);
if (pixel_view->signature != WandSignature)
return(MagickFalse);
length=strlen(PixelViewId);
if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C l i p P a t h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickClipPathImage() clips along the named paths from the 8BIM profile, if
% present. Later operations take effect inside the path. Id may be a number
% if preceded with #, to work on a numbered path, e.g., "#1" to use the first
% path.
%
% The format of the MagickClipPathImage method is:
%
% MagickBooleanType MagickClipPathImage(MagickWand *wand,
% const char *pathname,const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand,
const char *pathname,const MagickBooleanType inside)
{
return(MagickClipImagePath(wand,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetFillAlpha() returns the alpha used when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawGetFillAlpha method is:
%
% double DrawGetFillAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport double DrawGetFillAlpha(const DrawingWand *wand)
{
return(DrawGetFillOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetStrokeAlpha() returns the alpha of stroked object outlines.
%
% The format of the DrawGetStrokeAlpha method is:
%
% double DrawGetStrokeAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
*/
WandExport double DrawGetStrokeAlpha(const DrawingWand *wand)
{
return(DrawGetStrokeOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P e e k G r a p h i c W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPeekGraphicWand() returns the current drawing wand.
%
% The format of the PeekDrawingWand method is:
%
% DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
{
return(PeekDrawingWand(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P o p G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPopGraphicContext() destroys the current drawing wand and returns to the
% previously pushed drawing wand. Multiple drawing wands may exist. It is an
% error to attempt to pop more drawing wands than have been pushed, and it is
% proper form to pop all drawing wands which have been pushed.
%
% The format of the DrawPopGraphicContext method is:
%
% MagickBooleanType DrawPopGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPopGraphicContext(DrawingWand *wand)
{
(void) PopDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P u s h G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPushGraphicContext() clones the current drawing wand to create a new
% drawing wand. The original drawing wand(s) may be returned to by
% invoking PopDrawingWand(). The drawing wands are stored on a drawing wand
% stack. For every Pop there must have already been an equivalent Push.
%
% The format of the DrawPushGraphicContext method is:
%
% MagickBooleanType DrawPushGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPushGraphicContext(DrawingWand *wand)
{
(void) PushDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetFillAlpha() sets the alpha to use when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawSetFillAlpha method is:
%
% void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o fill_alpha: fill alpha
%
*/
WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
{
DrawSetFillOpacity(wand,fill_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetStrokeAlpha() specifies the alpha of stroked object outlines.
%
% The format of the DrawSetStrokeAlpha method is:
%
% void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o stroke_alpha: stroke alpha. The value 1.0 is opaque.
%
*/
WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
{
DrawSetStrokeOpacity(wand,stroke_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickColorFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickColorFloodfillImage method is:
%
% MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
% const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
PixelGetQuantumColor(fill,&draw_info->fill);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=ColorFloodfillImage(wand->images,draw_info,target,x,y,
bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickDescribeImage() identifies an image by printing its attributes to the
% file. Attributes include the image width, height, size, and others.
%
% The format of the MagickDescribeImage method is:
%
% const char *MagickDescribeImage(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport char *MagickDescribeImage(MagickWand *wand)
{
return(MagickIdentifyImage(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k F l a t t e n I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickFlattenImages() merges a sequence of images. This useful for
% combining Photoshop layers into a single image.
%
% The format of the MagickFlattenImages method is:
%
% MagickWand *MagickFlattenImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickFlattenImages(MagickWand *wand)
{
Image
*flatten_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
flatten_image=FlattenImages(wand->images,wand->exception);
if (flatten_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,flatten_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageAttribute() returns a value associated with the specified
% property. Use MagickRelinquishMemory() to free the value when you are
% finished with it.
%
% The format of the MagickGetImageAttribute method is:
%
% char *MagickGetImageAttribute(MagickWand *wand,const char *property)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
*/
WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property)
{
return(MagickGetImageProperty(wand,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageIndex() returns the index of the current image.
%
% The format of the MagickGetImageIndex method is:
%
% ssize_t MagickGetImageIndex(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport ssize_t MagickGetImageIndex(MagickWand *wand)
{
return(MagickGetIteratorIndex(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageChannelExtrema() gets the extrema for one or more image
% channels.
%
% The format of the MagickGetImageChannelExtrema method is:
%
% MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
% const ChannelType channel,size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
const ChannelType channel,size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageChannelExtrema(wand->images,channel,minima,maxima,
wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageExtrema() gets the extrema for the image.
%
% The format of the MagickGetImageExtrema method is:
%
% MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
% size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageExtrema(wand->images,minima,maxima,wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e M a t t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageMatte() returns MagickTrue if the image has a matte channel
% otherwise MagickFalse.
%
% The format of the MagickGetImageMatte method is:
%
% size_t MagickGetImageMatte(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(wand->images->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImagePixels() extracts pixel data from an image and returns it to
% you. The method returns MagickTrue on success otherwise MagickFalse if an
% error is encountered. The data is returned as char, short int, int, ssize_t,
% float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickGetImagePixels method is:
%
% MagickBooleanType MagickGetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
void *pixels)
{
return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageSize() returns the image length in bytes.
%
% The format of the MagickGetImageSize method is:
%
% MagickBooleanType MagickGetImageSize(MagickWand *wand,
% MagickSizeType *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the image length in bytes.
%
*/
WandExport MagickSizeType MagickGetImageSize(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(GetBlobSize(wand->images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMapImage() replaces the colors of an image with the closest color
% from a reference image.
%
% The format of the MagickMapImage method is:
%
% MagickBooleanType MagickMapImage(MagickWand *wand,
% const MagickWand *map_wand,const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o map: the map wand.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
WandExport MagickBooleanType MagickMapImage(MagickWand *wand,
const MagickWand *map_wand,const MagickBooleanType dither)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL))
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=MapImage(wand->images,map_wand->images,dither);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMatteFloodfillImage() changes the transparency value of any pixel that
% matches target and is an immediate neighbor. If the method
% FillToBorderMethod is specified, the transparency value is changed for any
% neighbor pixel that does not match the bordercolor member of image.
%
% The format of the MagickMatteFloodfillImage method is:
%
% MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
% const double alpha,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
const double alpha,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=MatteFloodfillImage(wand->images,target,ClampToQuantum(
(MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor !=
(PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The format of the MagickMedianFilterImage method is:
%
% MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
const double radius)
{
Image
*median_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
median_image=MedianFilterImage(wand->images,radius,wand->exception);
if (median_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,median_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMinimumImages() returns the minimum intensity of an image sequence.
%
% The format of the MagickMinimumImages method is:
%
% MagickWand *MagickMinimumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMinimumImages(MagickWand *wand)
{
Image
*minimum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
minimum_image=EvaluateImages(wand->images,MinEvaluateOperator,
wand->exception);
if (minimum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,minimum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickModeImage() makes each pixel the 'predominant color' of the
% neighborhood of the specified radius.
%
% The format of the MagickModeImage method is:
%
% MagickBooleanType MagickModeImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickModeImage(MagickWand *wand,
const double radius)
{
Image
*mode_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
mode_image=ModeImage(wand->images,radius,wand->exception);
if (mode_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,mode_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMosaicImages() inlays an image sequence to form a single coherent
% picture. It returns a wand with each image in the sequence composited at
% the location defined by the page offset of the image.
%
% The format of the MagickMosaicImages method is:
%
% MagickWand *MagickMosaicImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMosaicImages(MagickWand *wand)
{
Image
*mosaic_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
mosaic_image=MosaicImages(wand->images,wand->exception);
if (mosaic_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,mosaic_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickOpaqueImage method is:
%
% MagickBooleanType MagickOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImage(wand,target,fill,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickPaintFloodfillImage method is:
%
% MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
% const ChannelType channel,const PixelWand *fill,const double fuzz,
% const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
const ChannelType channel,const PixelWand *fill,const double fuzz,
const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
{
MagickBooleanType
status;
status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickPaintOpaqueImage method is:
%
% MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
% MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
% const ChannelType channel,const PixelWand *target,
% const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz));
}
WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
const ChannelType channel,const PixelWand *target,const PixelWand *fill,
const double fuzz)
{
MagickBooleanType
status;
status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickPaintTransparentImage method is:
%
% MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRadialBlurImage() radial blurs an image.
%
% The format of the MagickRadialBlurImage method is:
%
% MagickBooleanType MagickRadialBlurImage(MagickWand *wand,
% const double angle)
% MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand,
% const ChannelType channel,const double angle)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o angle: the angle of the blur in degrees.
%
*/
WandExport MagickBooleanType MagickRadialBlurImage(MagickWand *wand,
const double angle)
{
return(MagickRotationalBlurImage(wand,angle));
}
WandExport MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand,
const ChannelType channel,const double angle)
{
return(MagickRotationalBlurImageChannel(wand,channel,angle));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRecolorImage() apply color transformation to an image. The method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the MagickRecolorImage method is:
%
% MagickBooleanType MagickRecolorImage(MagickWand *wand,
% const size_t order,const double *color_matrix)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o order: the number of columns and rows in the color matrix.
%
% o color_matrix: An array of doubles representing the color matrix.
%
*/
WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand,
const size_t order,const double *color_matrix)
{
Image
*transform_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (color_matrix == (const double *) NULL)
return(MagickFalse);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
transform_image=RecolorImage(wand->images,order,color_matrix,
wand->exception);
if (transform_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,transform_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickReduceNoiseImage() smooths the contours of an image while still
% preserving edge information. The algorithm works by replacing each pixel
% with its neighbor closest in value. A neighbor is defined by radius. Use
% a radius of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the MagickReduceNoiseImage method is:
%
% MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
const double radius)
{
Image
*noise_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
noise_image=ReduceNoiseImage(wand->images,radius,wand->exception);
if (noise_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,noise_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMaximumImages() returns the maximum intensity of an image sequence.
%
% The format of the MagickMaximumImages method is:
%
% MagickWand *MagickMaximumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMaximumImages(MagickWand *wand)
{
Image
*maximum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator,
wand->exception);
if (maximum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,maximum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageAttribute() associates a property with an image.
%
% The format of the MagickSetImageAttribute method is:
%
% MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
% const char *property,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
% o value: the value.
%
*/
WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
const char *property,const char *value)
{
return(SetImageProperty(wand->images,property,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageIndex() set the current image to the position of the list
% specified with the index parameter.
%
% The format of the MagickSetImageIndex method is:
%
% MagickBooleanType MagickSetImageIndex(MagickWand *wand,
% const ssize_t index)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o index: the scene number.
%
*/
WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand,
const ssize_t index)
{
return(MagickSetIteratorIndex(wand,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k S e t I m a g e O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageOption() associates one or options with a particular image
% format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes").
%
% The format of the MagickSetImageOption method is:
%
% MagickBooleanType MagickSetImageOption(MagickWand *wand,
% const char *format,const char *key,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o format: the image format.
%
% o key: The key.
%
% o value: The value.
%
*/
WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand,
const char *format,const char *key,const char *value)
{
char
option[MaxTextExtent];
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value);
return(DefineImageOption(wand->image_info,option));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickTransparentImage method is:
%
% MagickBooleanType MagickTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickPaintTransparentImage(wand,target,alpha,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e g i o n O f I n t e r e s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRegionOfInterestImage() extracts a region of the image and returns it
% as a new wand.
%
% The format of the MagickRegionOfInterestImage method is:
%
% MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
% const size_t width,const size_t height,const ssize_t x,
% const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o width: the region width.
%
% o height: the region height.
%
% o x: the region x offset.
%
% o y: the region y offset.
%
*/
WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
const size_t width,const size_t height,const ssize_t x,
const ssize_t y)
{
return(MagickGetImageRegion(wand,width,height,x,y));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImagePixels() accepts pixel datand stores it in the image at the
% location you specify. The method returns MagickFalse on success otherwise
% MagickTrue if an error is encountered. The pixel data can be either char,
% short int, int, ssize_t, float, or double in the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickSetImagePixels method is:
%
% MagickBooleanType MagickSetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% const void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter of a region
% of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel,
% or DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
const void *pixels)
{
return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k W r i t e I m a g e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickWriteImageBlob() implements direct to memory image formats. It
% returns the image as a blob and its length. Use MagickSetFormat() to
% set the format of the returned blob (GIF, JPEG, PNG, etc.).
%
% Use MagickRelinquishMemory() to free the blob when you are done with it.
%
% The format of the MagickWriteImageBlob method is:
%
% unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the length of the blob.
%
*/
WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
{
return(MagickGetImageBlob(wand,length));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelView() returns a pixel view required for all other methods in the
% Pixel View API.
%
% The format of the NewPixelView method is:
%
% PixelView *NewPixelView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport PixelView *NewPixelView(MagickWand *wand)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->wand=wand;
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->region.width=wand->images->columns;
pixel_view->region.height=wand->images->rows;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelViewRegion() returns a pixel view required for all other methods
% in the Pixel View API.
%
% The format of the NewPixelViewRegion method is:
%
% PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixel_wands view.
%
*/
WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->wand=wand;
pixel_view->region.width=width;
pixel_view->region.height=height;
pixel_view->region.x=x;
pixel_view->region.y=y;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l G e t N e x t R o w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelGetNextRow() returns the next row as an array of pixel wands from the
% pixel iterator.
%
% The format of the PixelGetNextRow method is:
%
% PixelWand **PixelGetNextRow(PixelIterator *iterator,
% size_t *number_wands)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o number_wands: the number of pixel wands.
%
*/
WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator)
{
size_t
number_wands;
return(PixelGetNextIteratorRow(iterator,&number_wands));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l I t e r a t o r G e t E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelIteratorGetException() returns the severity, reason, and description of
% any error that occurs when using other methods in this API.
%
% The format of the PixelIteratorGetException method is:
%
% char *PixelIteratorGetException(const Pixeliterator *iterator,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *PixelIteratorGetException(const PixelIterator *iterator,
ExceptionType *severity)
{
return(PixelGetIteratorException(iterator,severity));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelViewIterator() iterates over the pixel view in parallel and calls
% your set method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetPixelViewIterator method is:
%
% MagickBooleanType SetPixelViewIterator(PixelView *destination,
% SetPixelViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the pixel view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination,
SetPixelViewMethod set,void *context)
{
#define SetPixelViewTag "PixelView/Set"
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (PixelView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetPixelViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=destination->region.y; y < (ssize_t) destination->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x,
y,destination->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetPixelViewIterator)
#endif
proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++,
destination->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferPixelViewIterator() iterates over two pixel views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% region is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination pixel view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferPixelViewIterator method is:
%
% MagickBooleanType TransferPixelViewIterator(PixelView *source,
% PixelView *destination,TransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source,
PixelView *destination,TransferPixelViewMethod transfer,void *context)
{
#define TransferPixelViewTag "PixelView/Transfer"
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdatePixelViewIterator() iterates over the pixel view in parallel and calls
% your update method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdatePixelViewIterator method is:
%
% MagickBooleanType UpdatePixelViewIterator(PixelView *source,
% UpdatePixelViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source,
UpdatePixelViewMethod update,void *context)
{
#define UpdatePixelViewTag "PixelView/Update"
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdatePixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y,
source->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (update(source,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->region.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdatePixelViewIterator)
#endif
proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
#endif
|
test37.c | int main() {
int a, b, c;
#pragma omp parallel
{
#pragma omp critical
{
#pragma omp atomic read
b = a;
#pragma omp atomic
a = a + 2;
#pragma omp critical (name)
{
b++;
}
}
a++;
}
}
|
openmp_pi.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void main(int argc, char *argv[]) {
int i;
int count = 0;
double x, y;
int samples, nthreads;
double pi;
samples = atoi(argv[1]);
nthreads = atoi(argv[2]);
double start = omp_get_wtime();
#pragma omp parallel firstprivate(x, y, i) reduction(+:count) num_threads(nthreads)
{
#pragma omp for
for (i = 0; i < samples; i++) {
x = (double)random() / RAND_MAX;
y = (double)random() / RAND_MAX;
if (x*x + y*y <= 1){
count++;
}
}
}
double end = omp_get_wtime();
printf("elapsed time: %.16g\n", end - start);
pi = 4.0 * ((double)count/(double)samples);
printf("Count = %d, Sample = %d, Estimate of pi = %7.5f\n", count, samples, pi);
} |
crossover.h | void inicializaCruzaHijos(int *c1, int *c2, int N)
{
#pragma omp for
for (int m = 0; m < N; m++)
{
c1[m] = -1;
c2[m] = -1;
}
}
void cruzaInicializaPadres(int *p1, int *p2, int *ind1, int *ind2, int N)
{
#pragma omp for
for (int m = 0; m < N; m++)
{
p1[m] = ind1[m];
p2[m] = ind2[m];
}
}
void cruzaCopiaMedio(int *c1, int *c2, int *p1, int *p2, int inicio, int fin)
{
#pragma omp for
for (int a = inicio; a < fin; a++)
{
c1[a] = p2[a];
c2[a] = p1[a];
}
}
void cruzaCopiaExtremo(int *c1, int *c2, int *p1, int *p2, int inicio, int fin, int N, int inimedio, int finmedio)
{
int flag1;
int flag2;
int pos1, pos2;
for (int a = inicio; a < fin; a++)
{
flag1 = 0;
flag2 = 0;
pos1 = p1[a];
pos2 = p2[a];
#pragma omp for
for (int b = inimedio; b < finmedio; b++)
{
if (pos1 == c1[b])
{
flag1 = 1;
}
if (pos2 == c2[b])
{
flag2 = 1;
}
}
if (!flag1)
{
c1[a] = pos1;
}
if (!flag2)
{
c2[a] = pos2;
}
}
}
// Partially Mapped Crossover
void Crossover(Chromo *parents, Chromo *population, int N, int inicio,int fin)
{
// hijos
int *c1 = (int *)malloc(sizeof(int) * N);
int *c2 = (int *)malloc(sizeof(int) * N);
// padres
int *p1 = (int *)malloc(sizeof(int) * N);
int *p2 = (int *)malloc(sizeof(int) * N);
int flag1;
int k = N / 3;
int posnp = fin;
for (int n = inicio; (n + 1) < fin; n = n + 2)
{
flag1 = 0;
inicializaCruzaHijos(c1, c2, N);
// inicializo los padres
cruzaInicializaPadres(p1, p2, parents[n].config, parents[n + 1].config, N);
cruzaCopiaMedio(c1, c2, p1, p2, k, (N - k));
cruzaCopiaExtremo(c1, c2, p1, p2, 0, k, N, k, (N - k));
cruzaCopiaExtremo(c1, c2, p1, p2, (N - k), N, N, k, (N - k));
int count, co;
#pragma omp for
for (int a = 0; a < N; a++)
{
flag1 = 0;
count = 0;
co = 0;
while ((!flag1) && count < N)
{
if (a == c1[count])
{
flag1 = 1;
}
count++;
}
if (!flag1)
{
while ((c1[co] != -1) && (co < N))
{
co++;
}
c1[co] = a;
}
flag1 = 0;
count = 0;
co = 0;
while ((!flag1) && count < N)
{
if (a == c2[count])
{
flag1 = 1;
}
count++;
}
if (!flag1)
{
while ((c2[co] != -1) && (co < N))
{
co++;
}
c2[co] = a;
}
}
#pragma omp for
for (int i = 0; i < N; i++)
{
population[posnp].config[i] = c1[i];
population[posnp + 1].config[i] = c2[i];
}
posnp = posnp + 2;
}
}
|
api.c | // RUN: %libomptarget-compile-run-and-check-generic
// XFAIL: nvptx64-nvidia-cuda
// XFAIL: nvptx64-nvidia-cuda-newRTL
// Fails on amdgpu with error: GPU Memory Error
// XFAIL: amdgcn-amd-amdhsa
// XFAIL: amdgcn-amd-amdhsa-newRTL
#include <stdio.h>
#include <omp.h>
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL
extern void __tgt_register_requires(int64_t);
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
#pragma omp requires unified_shared_memory
#define N 1024
void init(int A[], int B[], int C[]) {
for (int i = 0; i < N; ++i) {
A[i] = 0;
B[i] = 1;
C[i] = i;
}
}
int main(int argc, char *argv[]) {
const int device = omp_get_default_device();
// Manual registration of requires flags for Clang versions
// that do not support requires.
__tgt_register_requires(8);
// CHECK: Initial device: [[INITIAL_DEVICE:[0-9]+]]
printf("Initial device: %d\n", omp_get_initial_device());
// CHECK: Num devices: [[INITIAL_DEVICE]]
printf("Num devices: %d\n", omp_get_num_devices());
//
// Target alloc & target memcpy
//
int A[N], B[N], C[N];
// Init
init(A, B, C);
int *pA, *pB, *pC;
// map ptrs
pA = &A[0];
pB = &B[0];
pC = &C[0];
int *d_A = (int *)omp_target_alloc(N * sizeof(int), device);
int *d_B = (int *)omp_target_alloc(N * sizeof(int), device);
int *d_C = (int *)omp_target_alloc(N * sizeof(int), device);
// CHECK: omp_target_alloc succeeded
printf("omp_target_alloc %s\n", d_A && d_B && d_C ? "succeeded" : "failed");
omp_target_memcpy(d_B, pB, N * sizeof(int), 0, 0, device,
omp_get_initial_device());
omp_target_memcpy(d_C, pC, N * sizeof(int), 0, 0, device,
omp_get_initial_device());
#pragma omp target is_device_ptr(d_A, d_B, d_C) device(device)
{
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < N; i++) {
d_A[i] = d_B[i] + d_C[i] + 1;
}
}
omp_target_memcpy(pA, d_A, N * sizeof(int), 0, 0, omp_get_initial_device(),
device);
// CHECK: Test omp_target_memcpy: Succeeded
int fail = 0;
for (int i = 0; i < N; ++i) {
if (A[i] != i + 2)
fail++;
}
if (fail) {
printf("Test omp_target_memcpy: Failed\n");
} else {
printf("Test omp_target_memcpy: Succeeded\n");
}
//
// target_is_present and target_associate/disassociate_ptr
//
init(A, B, C);
// CHECK: B is not present, associating it...
// CHECK: omp_target_associate_ptr B succeeded
if (!omp_target_is_present(B, device)) {
printf("B is not present, associating it...\n");
int rc = omp_target_associate_ptr(B, d_B, N * sizeof(int), 0, device);
printf("omp_target_associate_ptr B %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: C is not present, associating it...
// CHECK: omp_target_associate_ptr C succeeded
if (!omp_target_is_present(C, device)) {
printf("C is not present, associating it...\n");
int rc = omp_target_associate_ptr(C, d_C, N * sizeof(int), 0, device);
printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: Inside target data: A is not present
// CHECK: Inside target data: B is present
// CHECK: Inside target data: C is present
#pragma omp target data map(from : B, C) device(device)
{
printf("Inside target data: A is%s present\n",
omp_target_is_present(A, device) ? "" : " not");
printf("Inside target data: B is%s present\n",
omp_target_is_present(B, device) ? "" : " not");
printf("Inside target data: C is%s present\n",
omp_target_is_present(C, device) ? "" : " not");
#pragma omp target map(from : A) device(device)
{
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < N; i++)
A[i] = B[i] + C[i] + 1;
}
}
// CHECK: B is present, disassociating it...
// CHECK: omp_target_disassociate_ptr B succeeded
// CHECK: C is present, disassociating it...
// CHECK: omp_target_disassociate_ptr C succeeded
if (omp_target_is_present(B, device)) {
printf("B is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(B, device);
printf("omp_target_disassociate_ptr B %s\n", !rc ? "succeeded" : "failed");
}
if (omp_target_is_present(C, device)) {
printf("C is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(C, device);
printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: Test omp_target_associate_ptr: Succeeded
fail = 0;
for (int i = 0; i < N; ++i) {
if (A[i] != i + 2)
fail++;
}
if (fail) {
printf("Test omp_target_associate_ptr: Failed\n");
} else {
printf("Test omp_target_associate_ptr: Succeeded\n");
}
omp_target_free(d_A, device);
omp_target_free(d_B, device);
omp_target_free(d_C, device);
printf("Done!\n");
return 0;
}
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "omp.h"
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
int Nthreads = atoi(argv[1]);
omp_set_num_threads(Nthreads);
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
#pragma omp parallel
{
int rank = omp_get_thread_num();
long int seed = rank;
srand48_r(seed, drandData+rank);
}
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
double startTime = omp_get_wtime();
#pragma omp parallel for reduction(+:Ncircle) reduction(+:Ntotal)
for (long long int n=0; n<Ntrials; n++) {
int rank = omp_get_thread_num();
double rand1;
double rand2;
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+rank, &rand1);
drand48_r(drandData+rank, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
// if (n%100 ==0) {
// double pi = 4.0*Ncircle/ (double) (n);
// printf("Our estimate of pi is %g \n", pi);
// }
}
double endTime = omp_get_wtime();
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
printf("Runtime was %f seconds\n", endTime-startTime);
free(drandData);
return 0;
}
|
type3_georel.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "type3_georel.kernel_inc.h"
int openmp_relng_1st_goto_init (openmp_pscmc_env * pe ,openmp_relng_1st_goto_struct * kerstr ){
return 0 ;}
void openmp_relng_1st_goto_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_relng_1st_goto_struct ));
}
int openmp_relng_1st_goto_get_num_compute_units (openmp_relng_1st_goto_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_relng_1st_goto_get_xlen (){
return 1 ;}
int openmp_relng_1st_goto_exec (openmp_relng_1st_goto_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_relng_1st_goto_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_relng_1st_goto_scmc_set_parameter_inoutput (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_xyzw (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_cu_cache (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_cu_xyzw (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_xoffset (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_yoffset (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_zoffset (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_fieldE (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_fieldB (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_fieldB1 (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_FoutJ (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_XLEN (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_YLEN (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_ZLEN (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_ovlp (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_numvec (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_num_ele (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_grid_cache_len (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_cu_cache_length (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_DELTA_X (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_DELTA_Y (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_DELTA_Z (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_Mass0 (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_Charge0 (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_Deltat (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_Tori_X0 (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_relng_1st_goto_scmc_set_parameter_Solve_Err (openmp_relng_1st_goto_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
|
sgels.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgels.c, normal z -> s, Fri Sep 28 17:38:05 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gels
*
* Solves overdetermined or underdetermined linear systems
* involving an m-by-n matrix A using a QR or LQ factorization of A. It
* is assumed that A has full rank. The following options are provided:
*
* # trans = PlasmaNoTrans and m >= n: find the least squares solution of an
* overdetermined system, i.e., solve the least squares problem:
* minimize || B - A*X ||.
*
* # trans = PlasmaNoTrans and m < n: find the minimum norm solution of an
* underdetermined system A * X = B.
*
* Several right-hand side vectors B and solution vectors X can be handled in a
* single call; they are stored as the columns of the m-by-nrhs right-hand side
* matrix B and the n-by-nrhs solution matrix X.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: the linear system involves A
* (the only supported option for now).
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns of the
* matrices B and X. nrhs >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit,
* if m >= n, A is overwritten by details of its QR factorization as
* returned by plasma_sgeqrf;
* if m < n, A is overwritten by details of its LQ factorization as
* returned by plasma_sgelqf.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data.
* Matrix of T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
* @param[in,out] pB
* On entry, pointer to the m-by-nrhs matrix B of right-hand side
* vectors, stored columnwise;
* On exit, if return value = 0, B is overwritten by the solution
* vectors, stored columnwise:
* if m >= n, rows 1 to N of B contain the least squares solution
* vectors; the residual sum of squares for the solution in each column
* is given by the sum of squares of the modulus of elements n+1 to m
* in that column;
* if m < n, rows 1 to n of B contain the minimum norm solution
* vectors;
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_sgels
* @sa plasma_cgels
* @sa plasma_dgels
* @sa plasma_sgels
* @sa plasma_sgeqrf
* @sa plasma_sgeqrs
*
******************************************************************************/
int plasma_sgels(plasma_enum_t trans,
int m, int n, int nrhs,
float *pA, int lda,
plasma_desc_t *T,
float *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (trans != PlasmaNoTrans) {
plasma_error("only PlasmaNoTrans supported");
return PlasmaErrorNotSupported;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -4;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -6;
}
if (ldb < imax(1, imax(m, n))) {
plasma_error("illegal value of ldb");
return -9;
}
// quick return
if (imin(m, imin(n, nrhs)) == 0) {
for (int i = 0; i < imax(m, n); i++)
for (int j = 0; j < nrhs; j++)
pB[j*ldb+i] = 0.0;
return PlasmaSuccess;
}
// Tune parameters.
if (plasma->tuning) {
if (m < n)
plasma_tune_gelqf(plasma, PlasmaRealFloat, m, n);
else
plasma_tune_geqrf(plasma, PlasmaRealFloat, m, n);
}
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
plasma_enum_t householder_mode = plasma->householder_mode;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
imax(m, n), nrhs, 0, 0, imax(m, n),
nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // geqrt/gelqt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaRealFloat);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_sge2desc(pA, lda, A, &sequence, &request);
plasma_omp_sge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_sgels(PlasmaNoTrans,
A, *T,
B, work,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gels
*
* Solves overdetermined or underdetermined linear
* system of equations using the tile QR or the tile LQ factorization.
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: the linear system involves A
* (the only supported option for now).
*
* @param[in,out] A
* Descriptor of matrix A stored in the tile layout.
* On exit,
* if m >= n, A is overwritten by details of its QR factorization
* as returned by plasma_sgeqrf;
* if m < n, A is overwritten by details of its LQ factorization
* as returned by plasma_sgelqf.
*
* @param[out] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by
* plasma_sgeqrf or plasma_sgelqf.
*
* @param[in,out] B
* Descriptor of matrix B.
* On entry, right-hand side matrix B in the tile layout.
* On exit, solution matrix X in the tile layout.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For QR/LQ factorizations used in GELS, it contains preallocated
* space for tau and work arrays.
* Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_sgels
* @sa plasma_omp_cgels
* @sa plasma_omp_dgels
* @sa plasma_omp_sgels
*
******************************************************************************/
void plasma_omp_sgels(plasma_enum_t trans,
plasma_desc_t A, plasma_desc_t T,
plasma_desc_t B, plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (trans != PlasmaNoTrans) {
plasma_error("only PlasmaNoTrans supported");
plasma_request_fail(sequence, request, PlasmaErrorNotSupported);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid descriptor A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid descriptor T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid descriptor B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || B.n == 0) {
// Zero matrix B.
plasma_pslaset(PlasmaGeneral, 0.0, 0.0, B, sequence, request);
return;
}
//===============================
// Solve using QR factorization.
//===============================
if (A.m >= A.n) {
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_psgeqrf_tree(A, T, work, sequence, request);
}
else {
plasma_psgeqrf(A, T, work, sequence, request);
}
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_psormqr_tree(PlasmaLeft, PlasmaTrans,
A, T, B,
work, sequence, request);
}
else {
plasma_psormqr(PlasmaLeft, PlasmaTrans,
A, T, B,
work, sequence, request);
}
plasma_pstrsm(PlasmaLeft, PlasmaUpper,
PlasmaNoTrans, PlasmaNonUnit,
1.0,
plasma_desc_view(A, 0, 0, A.n, A.n),
plasma_desc_view(B, 0, 0, A.n, B.n),
sequence, request);
}
//===============================
// Solve using LQ factorization.
//===============================
else {
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_psgelqf_tree(A, T, work, sequence, request);
}
else {
plasma_psgelqf(A, T, work, sequence, request);
}
// Zero the trailing block of the right-hand-side matrix.
// B has less rows than X.
plasma_pslaset(PlasmaGeneral, 0.0, 0.0,
plasma_desc_view(B, A.m, 0, A.n-A.m, B.n),
sequence, request);
// Solve L * Y = B.
plasma_pstrsm(
PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit,
1.0, plasma_desc_view(A, 0, 0, A.m, A.m),
plasma_desc_view(B, 0, 0, A.m, B.n),
sequence, request);
// Find X = Q^T * Y.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_psormlq_tree(PlasmaLeft, PlasmaTrans,
A, T, B,
work, sequence, request);
}
else {
plasma_psormlq(PlasmaLeft, PlasmaTrans,
A, T, B,
work, sequence, request);
}
}
}
|
XSHA512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2008,2011 by Solar Designer
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_XSHA512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_XSHA512);
#else
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "rawSHA512_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 4096
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "xsha512"
#define FORMAT_NAME "Mac OS X 10.7"
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define PLAINTEXT_LENGTH 107
#define SALT_SIZE 4
#define SALT_ALIGN sizeof(ARCH_WORD_32)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#if ARCH_BITS >= 64 || defined(__SSE2__)
/* 64-bitness happens to correlate with faster memcpy() */
#define PRECOMPUTE_CTX_FOR_SALT
#else
#undef PRECOMPUTE_CTX_FOR_SALT
#endif
#define BINARY_SIZE DIGEST_SIZE
#ifdef SIMD_COEF_64
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
static ARCH_WORD_64 (*saved_key)[SHA_BUF_SIZ*MAX_KEYS_PER_CRYPT];
static ARCH_WORD_64 (*crypt_out);
static int max_keys;
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
static ARCH_WORD_32 (*crypt_out)[DIGEST_SIZE/sizeof(ARCH_WORD_32)];
#ifdef PRECOMPUTE_CTX_FOR_SALT
static SHA512_CTX ctx_salt;
#else
static ARCH_WORD_32 saved_salt;
#endif
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_64
#ifndef _OPENMP
int omp_t = 1;
#endif
saved_key = mem_calloc_align(omp_t, sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt,
8 * sizeof(ARCH_WORD_64), MEM_ALIGN_SIMD);
max_keys = self->params.max_keys_per_crypt;
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
#ifndef SIMD_COEF_64
MEM_FREE(saved_len);
#endif
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char c[SALT_SIZE];
ARCH_WORD_32 dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
ciphertext += XSHA512_TAG_LENGTH;
p = ciphertext;
for (i = 0; i < sizeof(buf.c); i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#ifdef SIMD_COEF_64
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int salt_hash(void *salt)
{
return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
#ifndef SIMD_COEF_64
#ifdef PRECOMPUTE_CTX_FOR_SALT
SHA512_Init(&ctx_salt);
SHA512_Update(&ctx_salt, salt, SALT_SIZE);
#else
saved_salt = *(ARCH_WORD_32 *)salt;
#endif
#else
int i;
unsigned char *wucp = (unsigned char*)saved_key;
for (i = 0; i < max_keys; ++i) {
wucp[GETPOS(0, i)] = ((char*)salt)[0];
wucp[GETPOS(1, i)] = ((char*)salt)[1];
wucp[GETPOS(2, i)] = ((char*)salt)[2];
wucp[GETPOS(3, i)] = ((char*)salt)[3];
}
#endif
}
static void set_key(char *key, int index)
{
#ifndef SIMD_COEF_64
int length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
saved_len[index] = length;
memcpy(saved_key[index], key, length);
#else
ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64 *)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];
ARCH_WORD_64 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_64 temp;
unsigned char *wucp = (unsigned char*)saved_key;
// ok, first 4 bytes (if there are that many or more), we handle one offs.
// this is because we already have 4 byte salt loaded into our saved_key.
// IF there are more bytes of password, we drop into the multi loader.
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_64 *wkey = (ARCH_WORD_64*)&(key[4]);
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint64_t));
const ARCH_WORD_64 *wkey = is_aligned(key + 4, sizeof(uint64_t)) ?
(ARCH_WORD_64*)(key + 4) : (ARCH_WORD_64*)buf_aligned;
if ((char *)wkey == buf_aligned && strlen(key) >= 4)
strcpy(buf_aligned, key + 4);
#endif
len = 4;
if (key[0] == 0) {wucp[GETPOS(4, index)] = 0x80; wucp[GETPOS(5, index)] = wucp[GETPOS(6, index)] = wucp[GETPOS(7, index)] = 0; goto key_cleaning; }
wucp[GETPOS(4, index)] = key[0];
++len;
if (key[1] == 0) {wucp[GETPOS(5, index)] = 0x80; wucp[GETPOS(6, index)] = wucp[GETPOS(7, index)] = 0; goto key_cleaning; }
wucp[GETPOS(5, index)] = key[1];
++len;
if (key[2] == 0) {wucp[GETPOS(6, index)] = 0x80; wucp[GETPOS(7, index)] = 0; goto key_cleaning; }
wucp[GETPOS(6, index)] = key[2];
++len;
if (key[3] == 0) {wucp[GETPOS(7, index)] = 0x80; goto key_cleaning; }
wucp[GETPOS(7, index)] = key[3];
++len;
keybuf_word += SIMD_COEF_64;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP64((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP64((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffff) | (0x80ULL << 24));
len+=3;
goto key_cleaning;
}
if (!(temp & 0xff00000000ULL))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffffff) | (0x80ULL << 32));
len+=4;
goto key_cleaning;
}
if (!(temp & 0xff0000000000ULL))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffffffffULL) | (0x80ULL << 40));
len+=5;
goto key_cleaning;
}
if (!(temp & 0xff000000000000ULL))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffffffffffULL) | (0x80ULL << 48));
len+=6;
goto key_cleaning;
}
if (!(temp & 0xff00000000000000ULL))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffffffffffffULL) | (0x80ULL << 56));
len+=7;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP64(temp);
len += 8;
keybuf_word += SIMD_COEF_64;
}
*keybuf_word = 0x8000000000000000ULL;
key_cleaning:
keybuf_word += SIMD_COEF_64;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_64;
}
keybuffer[15*SIMD_COEF_64] = len << 3;
#endif
}
static char *get_key(int index)
{
#ifndef SIMD_COEF_64
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
#else
static unsigned char key[PLAINTEXT_LENGTH+1];
int i;
unsigned char *wucp = (unsigned char*)saved_key;
ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64*)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];
int len = (keybuffer[15*SIMD_COEF_64] >> 3) - SALT_SIZE;
for (i = 0; i < len; ++i)
key[i] = wucp[GETPOS(SALT_SIZE + i, index)];
key[i] = 0;
return (char*)key;
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#ifndef SIMD_COEF_64
#ifdef PRECOMPUTE_CTX_FOR_SALT
#pragma omp parallel for default(none) private(index) shared(ctx_salt, saved_key, saved_len, crypt_out)
#else
#pragma omp parallel for default(none) private(index) shared(saved_salt, saved_key, saved_len, crypt_out)
#endif
#else
#pragma omp parallel for
#endif
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {
#ifdef SIMD_COEF_64
SIMDSHA512body(&saved_key[index/MAX_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN);
#else
SHA512_CTX ctx;
#ifdef PRECOMPUTE_CTX_FOR_SALT
memcpy(&ctx, &ctx_salt, sizeof(ctx));
#else
SHA512_Init(&ctx);
SHA512_Update(&ctx, &saved_salt, SALT_SIZE);
#endif
SHA512_Update(&ctx, saved_key[index], saved_len[index]);
SHA512_Final((unsigned char *)(crypt_out[index]), &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((ARCH_WORD_64 *) binary)[0] == crypt_out[HASH_IDX])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
int i;
for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_64); i++)
if (((ARCH_WORD_64*) binary)[i] != crypt_out[HASH_IDX + i*SIMD_COEF_64])
return 0;
return 1;
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_XSHA512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
XSHA512_BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
sha512_common_tests_xsha512
}, {
init,
done,
fmt_default_reset,
sha512_common_prepare_xsha512,
sha512_common_valid_xsha512,
sha512_common_split_xsha512,
sha512_common_binary_xsha512,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
pr35438.c | /* PR c/35438 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void foo ();
#pragma omp threadprivate(foo) /* { dg-error "is not a variable" } */
|
cpd_omp.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <assert.h>
#include <math.h>
#ifdef HIPARTI_USE_MAGMA
#include "magma_v2.h"
#include "magma_lapack.h"
#else
#include "clapack.h"
#endif
#include "hicoo.h"
#ifdef HIPARTI_USE_OPENMP
double OmpCpdAlsStepHiCOO(
ptiSparseTensorHiCOO const * const hitsr,
ptiIndex const rank,
ptiIndex const niters,
double const tol,
const int tk,
const int tb,
const int * par_iters,
ptiRankMatrix ** mats,
ptiRankMatrix *** copy_mats,
ptiValue * const lambda,
int balanced)
{
ptiIndex const nmodes = hitsr->nmodes;
ptiIndex const stride = mats[0]->stride;
double fit = 0;
omp_set_num_threads(tk);
#ifdef HIPARTI_USE_MAGMA
magma_set_omp_numthreads(tk);
magma_set_lapack_numthreads(tk);
// printf("magma nthreads: %d\n", magma_get_parallel_numthreads());
// printf("magma nthreads: %d\n", magma_get_omp_numthreads());
// printf("magma lapack nthreads: %d\n", magma_get_lapack_numthreads());
#endif
// ptiAssert(stride == rank); // for correct column-major magma functions
for(ptiIndex m=0; m < nmodes; ++m) {
ptiAssert(hitsr->ndims[m] == mats[m]->nrows);
ptiAssert(mats[m]->ncols == rank);
}
ptiValue alpha = 1.0, beta = 0.0;
char notrans = 'N';
// char trans = 'T';
char uplo = 'L';
int blas_rank = (int) rank;
int blas_stride = (int) stride;
ptiRankMatrix * tmp_mat = mats[nmodes];
ptiRankMatrix ** ata = (ptiRankMatrix **)malloc((nmodes+1) * sizeof(*ata));
for(ptiIndex m=0; m < nmodes+1; ++m) {
ata[m] = (ptiRankMatrix *)malloc(sizeof(ptiRankMatrix));
ptiAssert(ptiNewRankMatrix(ata[m], rank, rank) == 0);
ptiAssert(mats[m]->stride == ata[m]->stride);
}
/* Compute all "ata"s */
for(ptiIndex m=0; m < nmodes; ++m) {
/* ata[m] = mats[m]^T * mats[m]), actually do A * A' due to row-major mats, and output an upper triangular matrix. */
int blas_nrows = (int)(mats[m]->nrows);
ssyrk_(&uplo, ¬rans, &blas_rank, &blas_nrows, &alpha,
mats[m]->values, &blas_stride, &beta, ata[m]->values, &blas_stride);
}
// printf("Initial mats:\n");
// for(size_t m=0; m < nmodes+1; ++m)
// ptiDumpRankMatrix(mats[m], stdout);
// printf("Initial ata:\n");
// for(ptiIndex m=0; m < nmodes+1; ++m)
// ptiDumpRankMatrix(ata[m], stdout);
double oldfit = 0;
ptiIndex * mats_order = (ptiIndex*)malloc(nmodes * sizeof(*mats_order));
ptiTimer tmp_timer;
ptiNewTimer(&tmp_timer, 0);
double mttkrp_time, solver_time, norm_time, ata_time, fit_time;
// double sum_time = 0.0;
for(ptiIndex it=0; it < niters; ++it) {
// printf(" its = %3lu\n", it+1);
// sum_time = 0.0;
ptiTimer timer;
ptiNewTimer(&timer, 0);
ptiStartTimer(timer);
for(ptiIndex m=0; m < nmodes; ++m) {
// printf("\nmode %u \n", m);
tmp_mat->nrows = mats[m]->nrows;
/* Factor Matrices order */
mats_order[0] = m;
for(ptiIndex i=1; i<nmodes; ++i)
mats_order[i] = (m+i) % nmodes;
// ptiAssert (ptiOmpMTTKRPHiCOO_MatrixTiling(hitsr, mats, mats_order, m) == 0);
ptiStartTimer(tmp_timer);
if(par_iters[m] == 1) {
ptiAssert (ptiOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats[m], mats_order, m, tk, tb, balanced) == 0);
} else {
ptiAssert (ptiOmpMTTKRPHiCOO_MatrixTiling_Scheduled(hitsr, mats, mats_order, m, tk, tb, balanced) == 0);
}
ptiStopTimer(tmp_timer);
mttkrp_time = ptiPrintElapsedTime(tmp_timer, "MTTKRP");
// printf("ptiMTTKRPHiCOO_MatrixTiling mats[nmodes]:\n");
// ptiDumpRankMatrix(mats[nmodes], stdout);
ptiStartTimer(tmp_timer);
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for num_threads(tk)
#endif
for(ptiIndex i=0; i<mats[m]->nrows * stride; ++i)
mats[m]->values[i] = tmp_mat->values[i];
/* Solve ? * ata[nmodes] = mats[nmodes] (tmp_mat) */
/* result is row-major, solve AT XT = BT */
ptiAssert ( ptiRankMatrixSolveNormals(m, nmodes, ata, mats[m]) == 0 );
ptiStopTimer(tmp_timer);
// solver_time = ptiPrintElapsedTime(tmp_timer, "memcpy and ptiRankMatrixSolveNormals");
// printf("Inverse mats[m]:\n");
// ptiDumpRankMatrix(mats[m], stdout);
/* Normalized mats[m], store the norms in lambda. Use different norms to avoid precision explosion. */
ptiStartTimer(tmp_timer);
if (it == 0 ) {
ptiRankMatrix2Norm(mats[m], lambda);
} else {
ptiRankMatrixMaxNorm(mats[m], lambda);
}
ptiStopTimer(tmp_timer);
// norm_time = ptiPrintElapsedTime(tmp_timer, "matrix norm");
// printf("Normalize mats[m]:\n");
// ptiDumpRankMatrix(mats[m], stdout);
// printf("lambda:\n");
// for(size_t i=0; i<rank; ++i)
// printf("%lf ", lambda[i]);
// printf("\n\n");
/* ata[m] = mats[m]^T * mats[m]) */
ptiStartTimer(tmp_timer);
int blas_nrows = (int)(mats[m]->nrows);
ssyrk_(&uplo, ¬rans, &blas_rank, &blas_nrows, &alpha,
mats[m]->values, &blas_stride, &beta, ata[m]->values, &blas_stride);
ptiStopTimer(tmp_timer);
// ata_time = ptiPrintElapsedTime(tmp_timer, "update ata");
// printf("Update ata[m]:\n");
// ptiDumpRankMatrix(ata[m], stdout);
// sum_time += mttkrp_time + norm_time + ata_time;
} // Loop nmodes
// PrintDenseValueVector(lambda, rank, "lambda", "debug.txt");
ptiStartTimer(tmp_timer);
fit = KruskalTensorFitHiCOO(hitsr, lambda, mats, ata);
ptiStopTimer(tmp_timer);
// fit_time = ptiPrintElapsedTime(tmp_timer, "KruskalTensorFitHiCOO");
ptiStopTimer(timer);
double its_time = ptiElapsedTime(timer);
ptiFreeTimer(timer);
printf(" its = %3u ( %.3lf s ) fit = %0.5f delta = %+0.4e\n",
it+1, its_time, fit, fit - oldfit);
if(it > 0 && fabs(fit - oldfit) < tol) {
break;
}
oldfit = fit;
} // Loop niters
GetRankFinalLambda(rank, nmodes, mats, lambda);
for(ptiIndex m=0; m < nmodes+1; ++m) {
ptiFreeRankMatrix(ata[m]);
}
free(ata);
free(mats_order);
return fit;
}
int ptiOmpCpdAlsHiCOO(
ptiSparseTensorHiCOO const * const hitsr,
ptiIndex const rank,
ptiIndex const niters,
double const tol,
const int tk,
const int tb,
int balanced,
ptiRankKruskalTensor * ktensor)
{
ptiIndex nmodes = hitsr->nmodes;
#ifdef HIPARTI_USE_MAGMA
magma_init();
#endif
/* Initialize factor matrices */
ptiIndex max_dim = 0;
for(ptiIndex m=0; m < nmodes; ++m) {
max_dim = (hitsr->ndims[m] > max_dim) ? hitsr->ndims[m] : max_dim;
}
ptiRankMatrix ** mats = (ptiRankMatrix **)malloc((nmodes+1) * sizeof(*mats));
for(ptiIndex m=0; m < nmodes+1; ++m) {
mats[m] = (ptiRankMatrix *)malloc(sizeof(ptiRankMatrix));
}
for(ptiIndex m=0; m < nmodes; ++m) {
ptiAssert(ptiNewRankMatrix(mats[m], hitsr->ndims[m], rank) == 0);
// assert(ptiConstantRankMatrix(mats[m], 1) == 0);
ptiAssert(ptiRandomizeRankMatrix(mats[m], hitsr->ndims[m], rank) == 0);
}
ptiAssert(ptiNewRankMatrix(mats[nmodes], max_dim, rank) == 0);
ptiAssert(ptiConstantRankMatrix(mats[nmodes], 0) == 0);
/* determine niters or num_kernel_dim to be parallelized */
int * par_iters = (int *)malloc(nmodes * sizeof(*par_iters));
ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits);
for(ptiIndex m=0; m < nmodes; ++m) {
par_iters[m] = 0;
ptiIndex num_kernel_dim = (hitsr->ndims[m] + sk - 1) / sk;
// printf("num_kernel_dim: %u, hitsr->nkiters[m] / num_kernel_dim: %u\n", num_kernel_dim, hitsr->nkiters[m]/num_kernel_dim);
if(num_kernel_dim <= PAR_MIN_DEGREE * NUM_CORES && hitsr->nkiters[m] / num_kernel_dim >= PAR_DEGREE_REDUCE) {
par_iters[m] = 1;
}
}
printf("par_iters:\n");
for(ptiIndex m=0; m < nmodes; ++m) {
printf("%d, ", par_iters[m]);
}
printf("\n");
ptiRankMatrix *** copy_mats = (ptiRankMatrix ***)malloc(nmodes * sizeof(*copy_mats));
for(ptiIndex m=0; m < nmodes; ++m) {
if (par_iters[m] == 1) {
copy_mats[m] = (ptiRankMatrix **)malloc(tk * sizeof(ptiRankMatrix*));
for(int t=0; t<tk; ++t) {
copy_mats[m][t] = (ptiRankMatrix *)malloc(sizeof(ptiRankMatrix));
ptiAssert(ptiNewRankMatrix(copy_mats[m][t], hitsr->ndims[m], rank) == 0);
ptiAssert(ptiConstantRankMatrix(copy_mats[m][t], 0) == 0);
}
}
}
ptiTimer timer;
ptiNewTimer(&timer, 0);
ptiStartTimer(timer);
ktensor->fit = OmpCpdAlsStepHiCOO(hitsr, rank, niters, tol, tk, tb, par_iters, mats, copy_mats, ktensor->lambda, balanced);
ptiStopTimer(timer);
ptiPrintElapsedTime(timer, "CPU HiCOO SpTns CPD-ALS");
ptiFreeTimer(timer);
ktensor->factors = mats;
#ifdef HIPARTI_USE_MAGMA
magma_finalize();
#endif
ptiFreeRankMatrix(mats[nmodes]);
for(ptiIndex m=0; m < nmodes; ++m) {
if(par_iters[m] == 1) {
for(int t=0; t<tk; ++t) {
ptiFreeRankMatrix(copy_mats[m][t]);
free(copy_mats[m][t]);
}
free(copy_mats[m]);
}
}
free(copy_mats);
return 0;
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.