source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
high.c | #include "high.h"
#include "sicm_low.h"
#include <fcntl.h>
#include <numa.h>
#include <semaphore.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <stdio.h>
#include "sicmimpl.h"
struct suballoc_t {
void* ptr;
struct sicm_device* device;
size_t sz;
};
struct allocation_t {
void* ptr;
size_t count;
struct suballoc_t* suballocs;
};
struct alloc_table_t {
size_t used, capacity;
struct allocation_t* data;
};
struct alloc_table_t alloc_table;
struct sicm_device_list sg_performance_list;
struct sicm_device_list sg_capacity_list;
sem_t* sem;
void add_allocation(void* ptr, struct suballoc_t* suballocs, size_t count) {
size_t k;
alloc_table.used++;
if(100 * alloc_table.used / alloc_table.capacity >= 80) {
struct allocation_t* old_data = alloc_table.data;
size_t old_capacity = alloc_table.capacity;
alloc_table.capacity *= 2;
alloc_table.used = 0;
alloc_table.data = malloc(alloc_table.capacity * sizeof(struct alloc_table_t));
for(k = 0; k < alloc_table.capacity; k++) alloc_table.data[k].ptr = NULL;
for(k = 0; k < old_capacity; k++)
if(old_data[k].ptr != NULL)
add_allocation(old_data[k].ptr, old_data[k].suballocs, old_data[k].count);
free(old_data);
}
k = sicm_hash((size_t)ptr) % alloc_table.capacity;
while(1) {
if(alloc_table.data[k].ptr == NULL) {
alloc_table.data[k].ptr = ptr;
alloc_table.data[k].count = count;
alloc_table.data[k].suballocs = suballocs;
break;
}
k = (k + 1) % alloc_table.capacity;
}
}
struct allocation_t* get_allocation(void* ptr) {
size_t k = sicm_hash((size_t)ptr) % alloc_table.capacity;
size_t initial_k = k;
while(1) {
if(alloc_table.data[k].ptr == ptr)
return &alloc_table.data[k];
k = (k + 1) % alloc_table.capacity;
if(k == initial_k) return NULL;
}
}
void remove_allocation(void* ptr) {
alloc_table.used--;
size_t k = sicm_hash((size_t)ptr) % alloc_table.capacity;
size_t initial_k = k;
while(1) {
if(alloc_table.data[k].ptr == ptr) {
alloc_table.data[k].ptr = NULL;
alloc_table.data[k].count = 0;
free(alloc_table.data[k].suballocs);
alloc_table.data[k].suballocs = NULL;
break;
}
k = (k + 1) % alloc_table.capacity;
if(k == initial_k) break;
}
}
int compare_perf(struct sicm_device* a, struct sicm_device* b) {
int a_near = sicm_is_near(a);
int b_near = sicm_is_near(b);
if(a_near && !b_near) return -1;
if(!a_near && b_near) return 1;
if(a_near) {
if(a->tag == SICM_KNL_HBM && b->tag != SICM_KNL_HBM) return -1;
if(a->tag == SICM_KNL_HBM) { // b is also KNL HBM
if(a->data.knl_hbm.page_size > b->data.knl_hbm.page_size) return -1;
return 1;
}
if(b->tag == SICM_KNL_HBM) return 1; // a is not KNL HBM
// at this point a and b are not KNL HBM
if(a->data.dram.page_size > b->data.dram.page_size) return -1;
return 1;
}
else {
// If we have to go to a far node, we want reverse preferences (i.e., DO NOT
// allocate on a performant far node)
if(a->tag == SICM_DRAM && b->tag != SICM_DRAM) return -1;
if(a->tag == SICM_DRAM) { // b is also KNL HBM
if(a->data.dram.page_size > b->data.dram.page_size) return -1;
return 1;
}
if(b->tag == SICM_DRAM) return 1; // a is not KNL HBM
// at this point a and b are not KNL HBM
if(a->data.knl_hbm.page_size > b->data.knl_hbm.page_size) return -1;
return 1;
}
return 0;
}
int compare_cap(struct sicm_device* a, struct sicm_device* b) {
int a_near = sicm_is_near(a);
int b_near = sicm_is_near(b);
if(a_near && !b_near) return -1;
if(!a_near && b_near) return 1;
if(a->tag == SICM_DRAM && b->tag != SICM_DRAM) return -1;
if(a->tag == SICM_DRAM) { // b is also KNL HBM
if(a->data.dram.page_size > b->data.dram.page_size) return -1;
return 1;
}
if(b->tag == SICM_DRAM) return 1; // a is not KNL HBM
// at this point a and b are not KNL HBM
if(a->data.knl_hbm.page_size > b->data.knl_hbm.page_size) return -1;
return 1;
}
void sort_list(struct sicm_device_list* list, int (*cmp)(struct sicm_device*, struct sicm_device*)) {
/* List already sorted */
if(list->count == 1) {
return;
}
/* This is the iterative version, so we need an explicit stack. */
int* stack = malloc(list->count * sizeof(int));
int top = -1;
stack[++top] = 0;
stack[++top] = list->count - 1;
int h, l;
while(top >= 0) {
h = stack[top--];
l = stack[top--];
// Partition the list and move the pivot to the right place
// The pivot is list->devices[h]
struct sicm_device swap;
int i = l - 1;
int j;
for(j = l; j < h; j++) {
if(cmp(&list->devices[j], &list->devices[h]) == -1) {
i++;
swap = list->devices[i];
list->devices[i] = list->devices[j];
list->devices[j] = swap;
}
}
swap = list->devices[i+1];
list->devices[i+1] = list->devices[h];
list->devices[h] = swap;
// Set up the "recursive call"
// The pivot is now at location i + 1
// Check if there are devices left of the pivot
if(i > l) {
stack[++top] = l;
stack[++top] = i;
}
// Check if there are devices right of the pivot
if(i+2 < h) {
stack[++top] = i + 2;
stack[++top] = h;
}
}
free(stack);
}
/* Accepts an allocation site ID and a size, does the allocation */
void* sg_alloc_exact(int id, size_t sz) {
void* ptr = NULL;
#pragma omp critical(sicm_greedy)
{
sem_wait(sem);
printf("Allocating to id %d\n", id);
int i;
size_t j;
for(i = 0; i < sg_performance_list.count; i++) {
struct sicm_device* device = &sg_performance_list.devices[i];
if(sicm_avail(device) * 1024 >= sz) {
ptr = sicm_device_alloc(device, sz);
size_t step = sicm_device_page_size(device);
if(step > 0) {
step *= 1024; // page size is reported in KiB
for(j = 0; j < sz; j += step) ((char*)ptr)[j] = 0;
}
struct suballoc_t* suballoc = malloc(sizeof(struct suballoc_t));
suballoc->ptr = ptr;
suballoc->device = device;
suballoc->sz = sz;
add_allocation(ptr, suballoc, 1);
break;
}
}
sem_post(sem);
}
return ptr;
}
void sg_free(void* ptr) {
#pragma omp critical(sicm_greedy)
{
sem_wait(sem);
printf("Freeing.\n");
struct allocation_t* a = get_allocation(ptr);
if(a) {
int i;
for(i = 0; i < a->count; i++)
sicm_device_free(a->suballocs[i].device, a->suballocs[i].ptr, a->suballocs[i].sz);
remove_allocation(ptr);
}
else {
printf("failed to free\n");
}
sem_post(sem);
}
}
__attribute__((constructor))
void sg_init() {
sem = sem_open("/sg_sem", O_CREAT | O_RDWR, 0644, 1);
printf("Initializing...\n");
int i, j;
int node_count = numa_max_node() + 1;
struct bitmask* cpumask = numa_allocate_cpumask();
int cpu_count = numa_num_possible_cpus();
int* compute_nodes = malloc(cpu_count * sizeof(int));
int compute_node_count = 0;
for(i = 0; i < node_count; i++) {
numa_node_to_cpus(i, cpumask);
for(j = 0; j < cpu_count; j++) {
if(numa_bitmask_isbitset(cpumask, j)) {
compute_nodes[compute_node_count] = i;
compute_node_count++;
break;
}
}
}
numa_free_cpumask(cpumask);
/* numa_run_on_node(compute_nodes[id % compute_node_count]); */
free(compute_nodes);
alloc_table.used = 0;
alloc_table.capacity = 32;
alloc_table.data = malloc(alloc_table.capacity * sizeof(struct alloc_table_t));
for(i = 0; i < 32; i++) alloc_table.data[i].ptr = NULL;
sg_performance_list = sicm_init();
int p = 0;
for(i = 0; i < sg_performance_list.count; i++) {
int page_size = sicm_device_page_size(&sg_performance_list.devices[i]);
if(page_size == -1 || page_size == normal_page_size) {
sg_performance_list.devices[p++] = sg_performance_list.devices[i];
}
}
sg_performance_list.devices = realloc(sg_performance_list.devices, p * sizeof(struct sicm_device));
sg_performance_list.count = p;
sg_capacity_list = (struct sicm_device_list){
.devices = malloc(sg_performance_list.count * sizeof(struct sicm_device)),
.count = sg_performance_list.count
};
// Sort the performance list first, since that's an okay ordering for the
// capacity list
sort_list(&sg_performance_list, compare_perf);
for(i = 0; i < sg_performance_list.count; i++)
sg_capacity_list.devices[i] = sg_performance_list.devices[i];
sort_list(&sg_capacity_list, compare_cap);
}
__attribute__((destructor))
void sg_terminate() {
printf("Terminating.\n");
free(sg_performance_list.devices);
free(sg_capacity_list.devices);
free(alloc_table.data);
sem_close(sem);
}
|
4848.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(2)
{
#pragma omp for schedule(static, 8)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp for schedule(static, 8)
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
data[i][j] -= mean[j];
/* Calculate the m * m covariance matrix. */
#pragma omp for schedule(static, 8)
for (j1 = 0; j1 < _PB_M; j1++)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
sink-2.c | /* { dg-do compile } */
void bar (int *);
void
foo ()
{
int i,j;
#pragma omp parallel for ordered(1)
for (i=0; i < 100; ++i)
{
#pragma omp ordered depend(sink:i-1)
bar(&i);
#pragma omp ordered depend(source)
}
}
|
lstm_bwd.c | #include <libxsmm.h>
#include <libxsmm_intrinsics_x86.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#include "lstm_bwd.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
#define CHKERR_LIBXSMM_DNN(A) if ( A != LIBXSMM_DNN_SUCCESS ) fprintf(stderr, "%s\n", libxsmm_dnn_get_error(A) );
#if 0
# define PRINT_LAYOUT2(DESC, LAYOUT) print_layout2(DESC, LAYOUT)
#else
# define PRINT_LAYOUT2(DESC, LAYOUT)
#endif
void print_layout2(char *desc, libxsmm_dnn_tensor_datalayout *layout) {
char *dim_name[] = {"N", "H", "W", "C", "K", "R", "S", "X", "RLM", "RLK", "RLN"};
int i;
printf("%s: F:%d IF:%d TT: %d [", desc, layout->format, layout->custom_format, layout->tensor_type);
for(i = layout->num_dims - 1; i >= 0; i--) {
printf("%s:%d%s", dim_name[layout->dim_type[i]], layout->dim_size[i], i == 0 ? "" : ", ");
}
printf("]\n");
}
void zero_buf(float* buf, size_t size) {
int i;
#if defined(_OPENMP)
# pragma omp parallel for private(i)
#endif
for (i = 0; i < (int)size; ++i) {
buf[i] = 0.0f;
}
}
void* lstm_bwd_create( int N, /* minibatch size */
int C, /* input size */
int K, /* output size */
int t, /* timesteps = 1 */
int nThreads, /* number of threads */
const int w_in_kcck,
const int w_in_trans,
const float *xt,
const float *csp,
const float *hp,
const float *ht,
const float *w,
const float *r,
const float *cst,
const float *it,
const float *ft,
const float *ot,
const float *cit,
const float *cot,
const float *dcs,
const float *dht,
float *dxt,
float *dcspt,
float *dhpt,
float *dw,
float *dr,
float *db )
{
libxsmm_dnn_rnncell_desc lstmcell_desc;
libxsmm_dnn_rnncell* libxsmm_handle;
libxsmm_dnn_tensor* libxsmm_input;
libxsmm_dnn_tensor* libxsmm_cs_prev;
libxsmm_dnn_tensor* libxsmm_hidden_state_prev;
libxsmm_dnn_tensor* libxsmm_hidden_state = NULL;
libxsmm_dnn_tensor* libxsmm_weight;
libxsmm_dnn_tensor* libxsmm_recur_weight;
libxsmm_dnn_tensor* libxsmm_cs;
libxsmm_dnn_tensor* libxsmm_i;
libxsmm_dnn_tensor* libxsmm_f;
libxsmm_dnn_tensor* libxsmm_o;
libxsmm_dnn_tensor* libxsmm_ci;
libxsmm_dnn_tensor* libxsmm_co;
libxsmm_dnn_tensor* libxsmm_dinput;
libxsmm_dnn_tensor* libxsmm_dcs_prev;
libxsmm_dnn_tensor* libxsmm_dhidden_state_prev;
libxsmm_dnn_tensor* libxsmm_dweight;
libxsmm_dnn_tensor* libxsmm_drecur_weight;
libxsmm_dnn_tensor* libxsmm_dbias;
libxsmm_dnn_tensor* libxsmm_dcs;
libxsmm_dnn_tensor* libxsmm_dhidden_state;
libxsmm_dnn_tensor_datalayout* libxsmm_layout;
libxsmm_dnn_err_t status;
if (N <= 0) {
printf("N: %d should be > 0\n", N);
}
if (C <= 0) {
printf("C: %d should be > 0\n", C);
}
if (K <= 0) {
printf("K: %d should be > 0\n", K);
}
if (xt == 0 || csp == 0 || hp == 0 || w == 0 || r == 0 || (t > 1 && ht == 0) ||
cst == 0 || it == 0 || ft == 0 || ot == 0 || cit == 0 || cot == 0 ||
dxt == 0 || dcspt== 0|| dhpt== 0|| dw == 0 || dr == 0 || db == 0 || dht == 0 || dcs == 0) {
printf("None of the pointers should be NULL::\n");
printf("x:%p\n", xt);
printf("csp:%p\n", csp);
printf("h_prev:%p\n", hp);
printf("ht:%p\n", ht);
printf("w:%p\n", w);
printf("r:%p\n", r);
printf("cs:%p\n", cst);
printf("i:%p\n", it);
printf("f:%p\n", ft);
printf("o:%p\n", ot);
printf("ci:%p\n", cit);
printf("co:%p\n", cot);
printf("dcs:%p\n", dcs);
printf("dxt:%p\n", dxt);
printf("dcspt:%p\n", dcspt);
printf("dhpt:%p\n", dhpt);
printf("dw:%p\n", dw);
printf("dr:%p\n", dr);
printf("db:%p\n", db);
printf("dht:%p\n", dht);
}
/* setup LIBXSMM handle */
lstmcell_desc.threads = nThreads;
lstmcell_desc.N = N;
lstmcell_desc.C = C;
lstmcell_desc.K = K;
lstmcell_desc.max_T = t;
lstmcell_desc.bn = 24;
if(N % 24 == 0) lstmcell_desc.bn = 24;
else if(N % 16 == 0) lstmcell_desc.bn = 16;
else if(N % 12 == 0) lstmcell_desc.bn = 12;
else if(N % 8 == 0) lstmcell_desc.bn = 8;
else if(N % 6 == 0) lstmcell_desc.bn = 6;
lstmcell_desc.bc = 64;
lstmcell_desc.bk = 64;
lstmcell_desc.cell_type = LIBXSMM_DNN_RNNCELL_LSTM;
lstmcell_desc.datatype_in = LIBXSMM_DNN_DATATYPE_F32;
lstmcell_desc.datatype_out = LIBXSMM_DNN_DATATYPE_F32;
lstmcell_desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NC;
lstmcell_desc.filter_format = (w_in_kcck ? LIBXSMM_DNN_TENSOR_FORMAT_CKPACKED : LIBXSMM_DNN_TENSOR_FORMAT_CK);
libxsmm_handle = libxsmm_dnn_create_rnncell( lstmcell_desc, &status );
CHKERR_LIBXSMM_DNN( status );
/* setup LIBXSMM buffers and filter */
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Xt", libxsmm_layout);
libxsmm_input = libxsmm_dnn_link_tensor( libxsmm_layout, xt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CSP", libxsmm_layout);
libxsmm_cs_prev = libxsmm_dnn_link_tensor( libxsmm_layout, csp, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("HP", libxsmm_layout);
libxsmm_hidden_state_prev = libxsmm_dnn_link_tensor( libxsmm_layout, hp, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
if(t > 1) {
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("HT", libxsmm_layout);
libxsmm_hidden_state = libxsmm_dnn_link_tensor( libxsmm_layout, ht, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
}
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, w_in_trans ? LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS : LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("W", libxsmm_layout);
libxsmm_weight = libxsmm_dnn_link_tensor( libxsmm_layout, w, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, w_in_trans ? LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS : LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("R", libxsmm_layout);
libxsmm_recur_weight = libxsmm_dnn_link_tensor( libxsmm_layout, r, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CSt", libxsmm_layout);
libxsmm_cs = libxsmm_dnn_link_tensor( libxsmm_layout, cst, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("It", libxsmm_layout);
libxsmm_i = libxsmm_dnn_link_tensor( libxsmm_layout, it, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Ft", libxsmm_layout);
libxsmm_f = libxsmm_dnn_link_tensor( libxsmm_layout, ft, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("Ot", libxsmm_layout);
libxsmm_o = libxsmm_dnn_link_tensor( libxsmm_layout, ot, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("CIt", libxsmm_layout);
libxsmm_ci = libxsmm_dnn_link_tensor( libxsmm_layout, cit, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("COt", libxsmm_layout);
libxsmm_co = libxsmm_dnn_link_tensor( libxsmm_layout, cot, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dXt", libxsmm_layout);
libxsmm_dinput = libxsmm_dnn_link_tensor( libxsmm_layout, dxt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dCSPt", libxsmm_layout);
libxsmm_dcs_prev = libxsmm_dnn_link_tensor( libxsmm_layout, dcspt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dHPt", libxsmm_layout);
libxsmm_dhidden_state_prev = libxsmm_dnn_link_tensor( libxsmm_layout, dhpt, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dW", libxsmm_layout);
libxsmm_dweight = libxsmm_dnn_link_tensor( libxsmm_layout, dw, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dR", libxsmm_layout);
libxsmm_drecur_weight = libxsmm_dnn_link_tensor( libxsmm_layout, dr, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dB", libxsmm_layout);
libxsmm_dbias = libxsmm_dnn_link_tensor( libxsmm_layout, db, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dCS", libxsmm_layout);
libxsmm_dcs = libxsmm_dnn_link_tensor( libxsmm_layout, dcs, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
libxsmm_layout = libxsmm_dnn_rnncell_create_tensor_datalayout( libxsmm_handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status ); CHKERR_LIBXSMM_DNN( status );
PRINT_LAYOUT2("dHt", libxsmm_layout);
libxsmm_dhidden_state = libxsmm_dnn_link_tensor( libxsmm_layout, dht, &status ); CHKERR_LIBXSMM_DNN( status );
libxsmm_dnn_destroy_tensor_datalayout( libxsmm_layout );
/* bind buffers and filter to handle */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_input, LIBXSMM_DNN_RNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_cs_prev, LIBXSMM_DNN_RNN_REGULAR_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_hidden_state_prev, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV ) );
if(t > 1) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_hidden_state, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE ) );
}
if(w_in_trans) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_weight, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_recur_weight, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS ) );
} else {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_weight, LIBXSMM_DNN_RNN_REGULAR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_recur_weight, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_cs, LIBXSMM_DNN_RNN_REGULAR_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_i, LIBXSMM_DNN_RNN_INTERNAL_I ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_f, LIBXSMM_DNN_RNN_INTERNAL_F ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_o, LIBXSMM_DNN_RNN_INTERNAL_O ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_ci, LIBXSMM_DNN_RNN_INTERNAL_CI ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_co, LIBXSMM_DNN_RNN_INTERNAL_CO ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dinput, LIBXSMM_DNN_RNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dcs_prev, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dhidden_state_prev, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dweight, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_drecur_weight, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dbias, LIBXSMM_DNN_RNN_GRADIENT_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dcs, LIBXSMM_DNN_RNN_GRADIENT_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_tensor( libxsmm_handle, libxsmm_dhidden_state, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE ) );
size_t scratch_size = libxsmm_dnn_rnncell_get_scratch_size( libxsmm_handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, &status );
CHKERR_LIBXSMM_DNN( status );
if (scratch_size > 0) {
void* scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_bind_scratch( libxsmm_handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, scratch ) );
zero_buf( (float*)scratch, scratch_size/4 );
}
return (void*)libxsmm_handle;
}
void lstm_bwd_set_ptr( void* libxsmm_handle_, int w_in_trans,
const int t,
const float *xt,
const float *csp,
const float *hp,
const float *ht,
const float *w,
const float *r,
const float *cst,
const float *it,
const float *ft,
const float *ot,
const float *cit,
const float *cot,
const float *dcs,
const float *dht,
float *dxt,
float *dcspt,
float *dhpt,
float *dw,
float *dr,
float *db )
{
libxsmm_dnn_err_t status = LIBXSMM_DNN_SUCCESS;
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
if (xt == 0 || csp == 0 || hp == 0 || w == 0 || r == 0 ||
cst == 0 || it == 0 || ft == 0 || ot == 0 || cit == 0 || cot == 0 ||
dxt == 0 || dcspt== 0|| dhpt== 0|| dw == 0 || dr == 0 || db == 0 || dht == 0 || dcs == 0) {
printf("None of the pointers should be NULL::\n");
printf("x:%p\n", xt);
printf("cst:%p\n", csp);
printf("h_prev:%p\n", hp);
printf("ht:%p\n", ht);
printf("w:%p\n", w);
printf("r:%p\n", r);
printf("cs:%p\n", cst);
printf("i:%p\n", it);
printf("f:%p\n", ft);
printf("o:%p\n", ot);
printf("ci:%p\n", cit);
printf("co:%p\n", cot);
printf("dcs:%p\n", dcs);
printf("dxt:%p\n", dxt);
printf("dcspt:%p\n", dcspt);
printf("dhpt:%p\n", dhpt);
printf("dw:%p\n", dw);
printf("dr:%p\n", dr);
printf("db:%p\n", db);
printf("dht:%p\n", dht);
}
/* bind buffers and filter to handle */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_set_sequence_length( handle, t) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status), xt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status), csp) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status), hp) );
if(ht != 0) { CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status), ht) ); }
if(w_in_trans) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status), w) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status), r) );
} else {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status), w) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status), r) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status), cst) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status), it) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status), ft) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status), ot) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status), cit) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status), cot) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status), dxt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status), dcspt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status), dhpt) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status), dw) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status), dr) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status), db) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status), dcs) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_set_tensor_data_ptr( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status), dht) );
}
void lstm_bwd_execute_omp( void* libxsmm_handle_ )
{
#ifdef _OPENMP
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
/* run LIBXSMM LSTM BWD */
#pragma omp parallel
{
int tid = omp_get_thread_num();
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_execute_st( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, 0, tid ) );
}
#else
printf("%s:%d Shouldn't come here... exiting\n", __FILE__, __LINE__);
exit(1);
#endif
}
void lstm_bwd_execute_st( void* libxsmm_handle_, int tid )
{
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
/* run LIBXSMM LSTM BWD */
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_execute_st( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, 0, tid ) );
}
void lstm_bwd_destroy( void* libxsmm_handle_ )
{
libxsmm_dnn_rnncell* handle = (libxsmm_dnn_rnncell*) libxsmm_handle_;
libxsmm_dnn_err_t status = LIBXSMM_DNN_SUCCESS;
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_INPUT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV, &status) ) );
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_WEIGHT_TRANS ) );
}
if(libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status)) {
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_RECUR_WEIGHT_TRANS ) );
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_REGULAR_CS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_I, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_F, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_O, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CI, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_INTERNAL_CO, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_CS, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_tensor( libxsmm_dnn_rnncell_get_tensor(handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE, &status) ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_REGULAR_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_I ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_F ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_O ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_CI ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_INTERNAL_CO ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_INPUT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_CS_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE_PREV ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_RECUR_WEIGHT ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_BIAS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_CS ) );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_tensor( handle, LIBXSMM_DNN_RNN_GRADIENT_HIDDEN_STATE ) );
size_t scratch_size = libxsmm_dnn_rnncell_get_scratch_size( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD, &status );
if (scratch_size > 0) {
void *scratch = libxsmm_dnn_rnncell_get_scratch_ptr( handle, &status );
CHKERR_LIBXSMM_DNN( libxsmm_dnn_rnncell_release_scratch( handle, LIBXSMM_DNN_COMPUTE_KIND_BWDUPD ) );
if(scratch) libxsmm_free(scratch);
}
CHKERR_LIBXSMM_DNN( libxsmm_dnn_destroy_rnncell( handle ) );
}
|
gpssim.c | #define _CRT_SECURE_NO_DEPRECATE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#ifdef _WIN32
#include "getopt.h"
#else
#include <unistd.h>
#endif
#ifndef bool
typedef int bool;
#define true 1
#define false 0
#endif
#define MAX_CHAR (100)
#define MAX_SAT (32)
#define MAX_CHAN (16)
#define USER_MOTION_SIZE (3000) // max 300 sec at 10Hz
#define N_SBF (51) // 6 seconds per subframe, 6 sec * 51 = 306 sec (max)
#define N_DWRD (N_SBF*10) // 10 word per subframe
#define SECONDS_IN_WEEK 604800.0
#define SECONDS_IN_HALF_WEEK 302400.0
#define SECONDS_IN_DAY 86400.0
#define SECONDS_IN_HOUR 3600.0
#define SECONDS_IN_MINUTE 60.0
#define POW2_M5 0.03125
#define POW2_M19 1.907348632812500e-6
#define POW2_M29 1.862645149230957e-9
#define POW2_M31 4.656612873077393e-10
#define POW2_M33 1.164153218269348e-10
#define POW2_M43 1.136868377216160e-13
#define POW2_M55 2.775557561562891e-17
// Conventional values employed in GPS ephemeris model (ICD-GPS-200)
#define GM_EARTH 3.986005e14
#define OMEGA_EARTH 7.2921151467e-5
#define PI 3.1415926535898
#define R2D 57.2957795131
#define SPEED_OF_LIGHT 2.99792458e8
#define LAMBDA_L1 0.190293672798365
#define CARR_FREQ (1575.42e6)
#define CODE_FREQ (1.023e6)
#define CARR_TO_CODE (1.0/1540.0)
// Sampling data format
#define SC08 (8)
#define SC16 (16)
#define ADC_GAIN (250) // for bladeRF txvga1 = -25dB with 50dB external attenuation
#define _SINE_LUT
#ifdef _SINE_LUT
int sinTable512[] = {
2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47,
50, 53, 56, 59, 62, 65, 68, 71, 74, 77, 80, 83, 86, 89, 91, 94,
97, 100, 103, 105, 108, 111, 114, 116, 119, 122, 125, 127, 130, 132, 135, 138,
140, 143, 145, 148, 150, 153, 155, 157, 160, 162, 164, 167, 169, 171, 173, 176,
178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 205, 207,
209, 210, 212, 214, 215, 217, 218, 220, 221, 223, 224, 225, 227, 228, 229, 230,
232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 241, 242, 243, 244, 244, 245,
245, 246, 247, 247, 248, 248, 248, 249, 249, 249, 249, 250, 250, 250, 250, 250,
250, 250, 250, 250, 250, 249, 249, 249, 249, 248, 248, 248, 247, 247, 246, 245,
245, 244, 244, 243, 242, 241, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232,
230, 229, 228, 227, 225, 224, 223, 221, 220, 218, 217, 215, 214, 212, 210, 209,
207, 205, 204, 202, 200, 198, 196, 194, 192, 190, 188, 186, 184, 182, 180, 178,
176, 173, 171, 169, 167, 164, 162, 160, 157, 155, 153, 150, 148, 145, 143, 140,
138, 135, 132, 130, 127, 125, 122, 119, 116, 114, 111, 108, 105, 103, 100, 97,
94, 91, 89, 86, 83, 80, 77, 74, 71, 68, 65, 62, 59, 56, 53, 50,
47, 44, 41, 38, 35, 32, 29, 26, 23, 20, 17, 14, 11, 8, 5, 2,
-2, -5, -8, -11, -14, -17, -20, -23, -26, -29, -32, -35, -38, -41, -44, -47,
-50, -53, -56, -59, -62, -65, -68, -71, -74, -77, -80, -83, -86, -89, -91, -94,
-97,-100,-103,-105,-108,-111,-114,-116,-119,-122,-125,-127,-130,-132,-135,-138,
-140,-143,-145,-148,-150,-153,-155,-157,-160,-162,-164,-167,-169,-171,-173,-176,
-178,-180,-182,-184,-186,-188,-190,-192,-194,-196,-198,-200,-202,-204,-205,-207,
-209,-210,-212,-214,-215,-217,-218,-220,-221,-223,-224,-225,-227,-228,-229,-230,
-232,-233,-234,-235,-236,-237,-238,-239,-240,-241,-241,-242,-243,-244,-244,-245,
-245,-246,-247,-247,-248,-248,-248,-249,-249,-249,-249,-250,-250,-250,-250,-250,
-250,-250,-250,-250,-250,-249,-249,-249,-249,-248,-248,-248,-247,-247,-246,-245,
-245,-244,-244,-243,-242,-241,-241,-240,-239,-238,-237,-236,-235,-234,-233,-232,
-230,-229,-228,-227,-225,-224,-223,-221,-220,-218,-217,-215,-214,-212,-210,-209,
-207,-205,-204,-202,-200,-198,-196,-194,-192,-190,-188,-186,-184,-182,-180,-178,
-176,-173,-171,-169,-167,-164,-162,-160,-157,-155,-153,-150,-148,-145,-143,-140,
-138,-135,-132,-130,-127,-125,-122,-119,-116,-114,-111,-108,-105,-103,-100, -97,
-94, -91, -89, -86, -83, -80, -77, -74, -71, -68, -65, -62, -59, -56, -53, -50,
-47, -44, -41, -38, -35, -32, -29, -26, -23, -20, -17, -14, -11, -8, -5, -2
};
int cosTable512[] = {
250, 250, 250, 250, 250, 249, 249, 249, 249, 248, 248, 248, 247, 247, 246, 245,
245, 244, 244, 243, 242, 241, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232,
230, 229, 228, 227, 225, 224, 223, 221, 220, 218, 217, 215, 214, 212, 210, 209,
207, 205, 204, 202, 200, 198, 196, 194, 192, 190, 188, 186, 184, 182, 180, 178,
176, 173, 171, 169, 167, 164, 162, 160, 157, 155, 153, 150, 148, 145, 143, 140,
138, 135, 132, 130, 127, 125, 122, 119, 116, 114, 111, 108, 105, 103, 100, 97,
94, 91, 89, 86, 83, 80, 77, 74, 71, 68, 65, 62, 59, 56, 53, 50,
47, 44, 41, 38, 35, 32, 29, 26, 23, 20, 17, 14, 11, 8, 5, 2,
-2, -5, -8, -11, -14, -17, -20, -23, -26, -29, -32, -35, -38, -41, -44, -47,
-50, -53, -56, -59, -62, -65, -68, -71, -74, -77, -80, -83, -86, -89, -91, -94,
-97,-100,-103,-105,-108,-111,-114,-116,-119,-122,-125,-127,-130,-132,-135,-138,
-140,-143,-145,-148,-150,-153,-155,-157,-160,-162,-164,-167,-169,-171,-173,-176,
-178,-180,-182,-184,-186,-188,-190,-192,-194,-196,-198,-200,-202,-204,-205,-207,
-209,-210,-212,-214,-215,-217,-218,-220,-221,-223,-224,-225,-227,-228,-229,-230,
-232,-233,-234,-235,-236,-237,-238,-239,-240,-241,-241,-242,-243,-244,-244,-245,
-245,-246,-247,-247,-248,-248,-248,-249,-249,-249,-249,-250,-250,-250,-250,-250,
-250,-250,-250,-250,-250,-249,-249,-249,-249,-248,-248,-248,-247,-247,-246,-245,
-245,-244,-244,-243,-242,-241,-241,-240,-239,-238,-237,-236,-235,-234,-233,-232,
-230,-229,-228,-227,-225,-224,-223,-221,-220,-218,-217,-215,-214,-212,-210,-209,
-207,-205,-204,-202,-200,-198,-196,-194,-192,-190,-188,-186,-184,-182,-180,-178,
-176,-173,-171,-169,-167,-164,-162,-160,-157,-155,-153,-150,-148,-145,-143,-140,
-138,-135,-132,-130,-127,-125,-122,-119,-116,-114,-111,-108,-105,-103,-100, -97,
-94, -91, -89, -86, -83, -80, -77, -74, -71, -68, -65, -62, -59, -56, -53, -50,
-47, -44, -41, -38, -35, -32, -29, -26, -23, -20, -17, -14, -11, -8, -5, -2,
2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47,
50, 53, 56, 59, 62, 65, 68, 71, 74, 77, 80, 83, 86, 89, 91, 94,
97, 100, 103, 105, 108, 111, 114, 116, 119, 122, 125, 127, 130, 132, 135, 138,
140, 143, 145, 148, 150, 153, 155, 157, 160, 162, 164, 167, 169, 171, 173, 176,
178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 205, 207,
209, 210, 212, 214, 215, 217, 218, 220, 221, 223, 224, 225, 227, 228, 229, 230,
232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 241, 242, 243, 244, 244, 245,
245, 246, 247, 247, 248, 248, 248, 249, 249, 249, 249, 250, 250, 250, 250, 250
};
#endif
typedef struct
{
int week;
double sec;
} gpstime_t;
typedef struct
{
int y;
int m;
int d;
int hh;
int mm;
double sec;
} datetime_t;
typedef struct
{
int vflg;
gpstime_t toc;
gpstime_t toe;
int iodc;
int iode;
double deltan;
double cuc;
double cus;
double cic;
double cis;
double crc;
double crs;
double ecc;
double sqrta;
double m0;
double omg0;
double inc0;
double aop;
double omgdot;
double idot;
double af0;
double af1;
double af2;
double tgd;
// Working variables follow
double n; // Mean motion (Average angular velocity)
double sq1e2; // sqrt(1-e^2)
double A; // Semi-major axis
double omgkdot; // OmegaDot-OmegaEdot
} ephem_t;
typedef struct
{
gpstime_t g;
double range;
double rate;
} range_t;
typedef struct
{
int prn;
int ca[1023];
double f_carr,f_code;
double carr_phase,code_phase;
gpstime_t g0;
unsigned long dwrd[N_DWRD];
int iword;
int ibit;
int icode;
int dataBit;
int codeCA;
short *iq_buff;
} channel_t;
void codegen(int *ca, int prn)
{
int delay[] = {
5, 6, 7, 8, 17, 18, 139, 140, 141, 251,
252, 254, 255, 256, 257, 258, 469, 470, 471, 472,
473, 474, 509, 512, 513, 514, 515, 516, 859, 860,
861, 862};
int g1[1023],g2[1023],r1[10],r2[10],c1,c2;
int i,j;
if (prn<1 || prn>32)
return;
for (i=0;i<10;i++)
r1[i] = r2[i] = -1;
for (i=0; i<1023; i++)
{
g1[i] = r1[9];
g2[i] = r2[9];
c1 = r1[2]*r1[9];
c2 = r2[1]*r2[2]*r2[5]*r2[7]*r2[8]*r2[9];
for (j=9; j>0; j--)
{
r1[j] = r1[j-1];
r2[j] = r2[j-1];
}
r1[0] = c1;
r2[0] = c2;
}
for (i=0,j=1023-delay[prn-1]; i<1023; i++,j++)
ca[i] = (1-g1[i]*g2[j%1023])/2;
return;
}
void date2gps(datetime_t *t, gpstime_t *g)
{
int doy[12] = {0,31,59,90,120,151,181,212,243,273,304,334};
int ye;
int de;
int lpdays;
ye = t->y - 1980;
// Compute the number of leap days since Jan 5/Jan 6, 1980.
lpdays = ye/4 + 1;
if ((ye%4)==0 && t->m<=2)
lpdays--;
// Compute the number of days elapsed since Jan 5/Jan 6, 1980.
de = ye*365 + doy[t->m-1] + t->d + lpdays - 6;
// Convert time to GPS weeks and seconds.
g->week = de / 7;
g->sec = (double)(de%7)*SECONDS_IN_DAY + t->hh*SECONDS_IN_HOUR
+ t->mm*SECONDS_IN_MINUTE + t->sec;
return;
}
void xyz2llh(double *xyz, double *llh)
{
double a,eps,e,e2;
double x,y,z;
double rho2,dz,zdz,nh,slat,n,dz_new;
a = 6378137.0;
e = 0.0818191908426;
eps = 1.0e-3;
e2 = e*e;
x = xyz[0];
y = xyz[1];
z = xyz[2];
rho2 = x*x + y*y;
dz = e2*z;
while (1)
{
zdz = z + dz;
nh = sqrt(rho2 + zdz*zdz);
slat = zdz / nh;
n = a / sqrt(1.0-e2*slat*slat);
dz_new = n*e2*slat;
if (fabs(dz-dz_new) < eps)
break;
dz = dz_new;
}
llh[0] = atan2(zdz, sqrt(rho2));
llh[1] = atan2(y, x);
llh[2] = nh - n;
return;
}
void llh2xyz(double *llh, double *xyz)
{
double n;
double a;
double e;
double e2;
double clat;
double slat;
double clon;
double slon;
double d,nph;
double tmp;
a = 6378137.0;
e = 0.0818191908426;
e2 = e*e;
clat = cos(llh[0]);
slat = sin(llh[0]);
clon = cos(llh[1]);
slon = sin(llh[1]);
d = e*slat;
n = a/sqrt(1.0-d*d);
nph = n + llh[2];
tmp = nph*clat;
xyz[0] = tmp*clon;
xyz[1] = tmp*slon;
xyz[2] = ((1.0-e2)*n + llh[2])*slat;
return;
}
void ltcmat(double *llh, double t[3][3])
{
double slat, clat;
double slon, clon;
slat = sin(llh[0]);
clat = cos(llh[0]);
slon = sin(llh[1]);
clon = cos(llh[1]);
t[0][0] = -slat*clon;
t[0][1] = -slat*slon;
t[0][2] = clat;
t[1][0] = -slon;
t[1][1] = clon;
t[1][2] = 0.0;
t[2][0] = clat*clon;
t[2][1] = clat*slon;
t[2][2] = slat;
return;
}
void ecef2neu(double *xyz, double t[3][3], double *neu)
{
neu[0] = t[0][0]*xyz[0] + t[0][1]*xyz[1] + t[0][2]*xyz[2];
neu[1] = t[1][0]*xyz[0] + t[1][1]*xyz[1] + t[1][2]*xyz[2];
neu[2] = t[2][0]*xyz[0] + t[2][1]*xyz[1] + t[2][2]*xyz[2];
return;
}
void neu2azel(double *azel, double *neu)
{
double ne;
azel[0] = atan2(neu[1],neu[0]);
if (azel[0]<0.0)
azel[0] += (2.0*PI);
ne = sqrt(neu[0]*neu[0] + neu[1]*neu[1]);
azel[1] = atan2(neu[2], ne);
return;
}
void satpos(ephem_t eph, gpstime_t g, double *pos, double *vel, double *clk)
{
// Computing Satellite Velocity using the Broadcast Ephemeris
// http://www.ngs.noaa.gov/gps-toolbox/bc_velo.htm
double tk;
double mk;
double ek;
double ekold;
double ekdot;
double cek,sek;
double pk;
double pkdot;
double c2pk,s2pk;
double uk;
double ukdot;
double cuk,suk;
double ok;
double sok,cok;
double ik;
double ikdot;
double sik,cik;
double rk;
double rkdot;
double xpk,ypk;
double xpkdot,ypkdot;
double relativistic, OneMinusecosE, tmp;
tk = g.sec - eph.toe.sec;
if(tk>SECONDS_IN_HALF_WEEK)
tk -= SECONDS_IN_WEEK;
else if(tk<-SECONDS_IN_HALF_WEEK)
tk += SECONDS_IN_WEEK;
mk = eph.m0 + eph.n*tk;
ek = mk;
ekold = ek + 1.0;
while(fabs(ek-ekold)>1.0E-14)
{
ekold = ek;
OneMinusecosE = 1.0-eph.ecc*cos(ekold);
ek = ek + (mk-ekold+eph.ecc*sin(ekold))/OneMinusecosE;
}
sek = sin(ek);
cek = cos(ek);
ekdot = eph.n/OneMinusecosE;
relativistic = -4.442807633E-10*eph.ecc*eph.sqrta*sek;
pk = atan2(eph.sq1e2*sek,cek-eph.ecc) + eph.aop;
pkdot = eph.sq1e2*ekdot/OneMinusecosE;
s2pk = sin(2.0*pk);
c2pk = cos(2.0*pk);
uk = pk + eph.cus*s2pk + eph.cuc*c2pk;
suk = sin(uk);
cuk = cos(uk);
ukdot = pkdot*(1.0 + 2.0*(eph.cus*c2pk - eph.cuc*s2pk));
rk = eph.A*OneMinusecosE + eph.crc*c2pk + eph.crs*s2pk;
rkdot = eph.A*eph.ecc*sek*ekdot + 2.0*pkdot*(eph.crs*c2pk - eph.crc*s2pk);
ik = eph.inc0 + eph.idot*tk + eph.cic*c2pk + eph.cis*s2pk;
sik = sin(ik);
cik = cos(ik);
ikdot = eph.idot + 2.0*pkdot*(eph.cis*c2pk - eph.cic*s2pk);
xpk = rk*cuk;
ypk = rk*suk;
xpkdot = rkdot*cuk - ypk*ukdot;
ypkdot = rkdot*suk + xpk*ukdot;
ok = eph.omg0 + tk*eph.omgkdot - OMEGA_EARTH*eph.toe.sec;
sok = sin(ok);
cok = cos(ok);
pos[0] = xpk*cok - ypk*cik*sok;
pos[1] = xpk*sok + ypk*cik*cok;
pos[2] = ypk*sik;
tmp = ypkdot*cik - ypk*sik*ikdot;
vel[0] = -eph.omgkdot*pos[1] + xpkdot*cok - tmp*sok;
vel[1] = eph.omgkdot*pos[0] + xpkdot*sok + tmp*cok;
vel[2] = ypk*cik*ikdot + ypkdot*sik;
// Satellite clock correction
tk = g.sec - eph.toc.sec;
if(tk>SECONDS_IN_HALF_WEEK)
tk -= SECONDS_IN_WEEK;
else if(tk<-SECONDS_IN_HALF_WEEK)
tk += SECONDS_IN_WEEK;
clk[0] = eph.af0 + tk*(eph.af1 + tk*eph.af2) + relativistic - eph.tgd;
clk[1] = eph.af1 + 2.0*tk*eph.af2;
return;
}
void eph2sbf(ephem_t eph, unsigned long sbf[5][10])
{
unsigned long wn;
unsigned long toe;
unsigned long toc;
unsigned long iode;
unsigned long iodc;
long deltan;
long cuc;
long cus;
long cic;
long cis;
long crc;
long crs;
unsigned long ecc;
unsigned long sqrta;
long m0;
long omg0;
long inc0;
long aop;
long omgdot;
long idot;
long af0;
long af1;
long af2;
long tgd;
unsigned long ura = 2UL;
unsigned long dataId = 1UL;
unsigned long sbf4_page25_svId = 63UL;
unsigned long sbf5_page25_svId = 51UL;
unsigned long wna;
unsigned long toa;
wn = (unsigned long)(eph.toe.week%1024);
toe = (unsigned long)(eph.toe.sec/16.0);
toc = (unsigned long)(eph.toc.sec/16.0);
iode = (unsigned long)(eph.iode);
iodc = (unsigned long)(eph.iodc);
deltan = (long)(eph.deltan/POW2_M43/PI);
cuc = (long)(eph.cuc/POW2_M29);
cus = (long)(eph.cus/POW2_M29);
cic = (long)(eph.cic/POW2_M29);
cis = (long)(eph.cis/POW2_M29);
crc = (long)(eph.crc/POW2_M5);
crs = (long)(eph.crs/POW2_M5);
ecc = (unsigned long)(eph.ecc/POW2_M33);
sqrta = (unsigned long)(eph.sqrta/POW2_M19);
m0 = (long)(eph.m0/POW2_M31/PI);
omg0 = (long)(eph.omg0/POW2_M31/PI);
inc0 = (long)(eph.inc0/POW2_M31/PI);
aop = (long)(eph.aop/POW2_M31/PI);
omgdot = (long)(eph.omgdot/POW2_M43/PI);
idot = (long)(eph.idot/POW2_M43/PI);
af0 = (long)(eph.af0/POW2_M31);
af1 = (long)(eph.af1/POW2_M43);
af2 = (long)(eph.af2/POW2_M55);
tgd = (long)(eph.tgd/POW2_M31);
wna = (unsigned long)(eph.toe.week%256);
toa = (unsigned long)(eph.toe.sec/4096.0);
// Subframe 1
sbf[0][0] = 0x8B0000UL<<6;
sbf[0][1] = 0x1UL<<8;
sbf[0][2] = ((wn&0x3FFUL)<<20) | (ura<<14) | (((iodc>>8)&0x3UL)<<6);
sbf[0][3] = 0UL;
sbf[0][4] = 0UL;
sbf[0][5] = 0UL;
sbf[0][6] = (tgd&0xFFUL)<<6;
sbf[0][7] = ((iodc&0xFFUL)<<22) | ((toc&0xFFFFUL)<<6);
sbf[0][8] = ((af2&0xFFUL)<<22) | ((af1&0xFFFFUL)<<6);
sbf[0][9] = (af0&0x3FFFFFUL)<<8;
// Subframe 2
sbf[1][0] = 0x8B0000UL<<6;
sbf[1][1] = 0x2UL<<8;
sbf[1][2] = ((iode&0xFFUL)<<22) | ((crs&0xFFFFUL)<<6);
sbf[1][3] = ((deltan&0xFFFFUL)<<14) | (((m0>>24)&0xFFUL)<<6);
sbf[1][4] = (m0&0xFFFFFFUL)<<6;
sbf[1][5] = ((cuc&0xFFFFUL)<<14) | (((ecc>>24)&0xFFUL)<<6);
sbf[1][6] = (ecc&0xFFFFFFUL)<<6;
sbf[1][7] = ((cus&0xFFFFUL)<<14) | (((sqrta>>24)&0xFFUL)<<6);
sbf[1][8] = (sqrta&0xFFFFFFUL)<<6;
sbf[1][9] = (toe&0xFFFFUL)<<14;
// Subframe 3
sbf[2][0] = 0x8B0000UL<<6;
sbf[2][1] = 0x3UL<<8;
sbf[2][2] = ((cic&0xFFFFUL)<<14) | (((omg0>>24)&0xFFUL)<<6);
sbf[2][3] = (omg0&0xFFFFFFUL)<<6;
sbf[2][4] = ((cis&0xFFFFUL)<<14) | (((inc0>>24)&0xFFUL)<<6);
sbf[2][5] = (inc0&0xFFFFFFUL)<<6;
sbf[2][6] = ((crc&0xFFFFUL)<<14) | (((aop>>24)&0xFFUL)<<6);
sbf[2][7] = (aop&0xFFFFFFUL)<<6;
sbf[2][8] = (omgdot&0xFFFFFFUL)<<6;
sbf[2][9] = ((iode&0xFFUL)<<22) | ((idot&0x3FFFUL)<<8);
// Subframe 4, page 25
sbf[3][0] = 0x8B0000UL<<6;
sbf[3][1] = 0x4UL<<8;
sbf[3][2] = (dataId<<28) | (sbf4_page25_svId<<22);
sbf[3][3] = 0UL;
sbf[3][4] = 0UL;
sbf[3][5] = 0UL;
sbf[3][6] = 0UL;
sbf[3][7] = 0UL;
sbf[3][8] = 0UL;
sbf[3][9] = 0UL;
// Subframe 5, page 25
sbf[4][0] = 0x8B0000UL<<6;
sbf[4][1] = 0x5UL<<8;
sbf[4][2] = (dataId<<28) | (sbf5_page25_svId<<22) | ((toa&0xFFUL)<<14) | ((wna&0xFFUL)<<6);
sbf[4][3] = 0UL;
sbf[4][4] = 0UL;
sbf[4][5] = 0UL;
sbf[4][6] = 0UL;
sbf[4][7] = 0UL;
sbf[4][8] = 0UL;
sbf[4][9] = 0UL;
return;
}
unsigned long countBits(unsigned long v)
{
unsigned long c;
const int S[] = {1, 2, 4, 8, 16};
const unsigned long B[] = {
0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF, 0x0000FFFF};
c = v;
c = ((c >> S[0]) & B[0]) + (c & B[0]);
c = ((c >> S[1]) & B[1]) + (c & B[1]);
c = ((c >> S[2]) & B[2]) + (c & B[2]);
c = ((c >> S[3]) & B[3]) + (c & B[3]);
c = ((c >> S[4]) & B[4]) + (c & B[4]);
return(c);
}
unsigned long computeChecksum(unsigned long source, int nib)
{
/*
Bits 31 to 30 = 2 LSBs of the previous transmitted word, D29* and D30*
Bits 29 to 6 = Source data bits, d1, d2, ..., d24
Bits 5 to 0 = Empty parity bits
*/
/*
Bits 31 to 30 = 2 LSBs of the previous transmitted word, D29* and D30*
Bits 29 to 6 = Data bits transmitted by the SV, D1, D2, ..., D24
Bits 5 to 0 = Computed parity bits, D25, D26, ..., D30
*/
/*
1 2 3
bit 12 3456 7890 1234 5678 9012 3456 7890
--- -------------------------------------
D25 11 1011 0001 1111 0011 0100 1000 0000
D26 01 1101 1000 1111 1001 1010 0100 0000
D27 10 1110 1100 0111 1100 1101 0000 0000
D28 01 0111 0110 0011 1110 0110 1000 0000
D29 10 1011 1011 0001 1111 0011 0100 0000
D30 00 1011 0111 1010 1000 1001 1100 0000
*/
unsigned long bmask[6] = {
0x3B1F3480UL, 0x1D8F9A40UL, 0x2EC7CD00UL,
0x1763E680UL, 0x2BB1F340UL, 0x0B7A89C0UL };
unsigned long D;
unsigned long d = source & 0x3FFFFFC0UL;
unsigned long D29 = (source>>31)&0x1UL;
unsigned long D30 = (source>>30)&0x1UL;
if (nib) // Non-information bearing bits for word 2 and 10
{
/*
Solve bits 23 and 24 to presearve parity check
with zeros in bits 29 and 30.
*/
if ((D30 + countBits(bmask[4] & d)) % 2)
d ^= (0x1UL<<6);
if ((D29 + countBits(bmask[5] & d)) % 2)
d ^= (0x1UL<<7);
}
D = d;
if (D30)
D ^= 0x3FFFFFC0UL;
D |= ((D29 + countBits(bmask[0] & d)) % 2) << 5;
D |= ((D30 + countBits(bmask[1] & d)) % 2) << 4;
D |= ((D29 + countBits(bmask[2] & d)) % 2) << 3;
D |= ((D30 + countBits(bmask[3] & d)) % 2) << 2;
D |= ((D30 + countBits(bmask[4] & d)) % 2) << 1;
D |= ((D29 + countBits(bmask[5] & d)) % 2);
D &= 0x3FFFFFFFUL;
//D |= (source & 0xC0000000UL); // Add D29* and D30* from source data bits
return(D);
}
int checkExpDesignator(char *str, int len)
{
int i,n=0;
for (i=0; i<len; i++)
{
if (str[i]=='D')
{
n++;
str[i] = 'E';
}
}
return(n);
}
int readRinexNav(ephem_t eph[], char *fname)
{
FILE *fp;
int nsat;
int sv;
char str[MAX_CHAR];
char tmp[20];
datetime_t t;
gpstime_t g;
gpstime_t g0;
double dt;
if (NULL==(fp=fopen(fname, "rt")))
return(-1);
for (sv=0; sv<MAX_SAT; sv++)
eph[sv].vflg = 0; // Clear valid flag
while (1)
{
if (NULL==fgets(str, MAX_CHAR, fp))
break;
if (0==strncmp(str+60, "END OF HEADER", 13))
break;
}
nsat = 0;
g0.week = -1;
while (1)
{
// PRN / EPOCH / SV CLK
if (NULL==fgets(str, MAX_CHAR, fp))
break;
strncpy(tmp, str+3, 2);
tmp[2] = 0;
t.y = atoi(tmp) + 2000;
strncpy(tmp, str+6, 2);
tmp[2] = 0;
t.m = atoi(tmp);
strncpy(tmp, str+9, 2);
tmp[2] = 0;
t.d = atoi(tmp);
strncpy(tmp, str+12, 2);
tmp[2] = 0;
t.hh = atoi(tmp);
strncpy(tmp, str+15, 2);
tmp[2] = 0;
t.mm = atoi(tmp);
strncpy(tmp, str+18, 4);
tmp[2] = 0;
t.sec = atof(tmp);
date2gps(&t, &g);
if (g0.week==-1)
g0 = g;
dt = g.sec - g0.sec;
if ((g.week==g0.week) && (dt>-SECONDS_IN_HOUR) && (dt<=SECONDS_IN_HOUR))
{
strncpy(tmp, str, 2);
tmp[2] = 0;
sv = atoi(tmp)-1;
if (eph[sv].vflg==0)
{
eph[sv].toc = g;
strncpy(tmp, str+22, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19); // tmp[15]='E';
eph[sv].af0 = atof(tmp);
strncpy(tmp, str+41, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].af1 = atof(tmp);
strncpy(tmp, str+60, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].af2 = atof(tmp);
// BROADCAST ORBIT - 1
if (NULL==fgets(str, MAX_CHAR, fp))
break;
strncpy(tmp, str+3, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].iode = (int)atof(tmp);
strncpy(tmp, str+22, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].crs = atof(tmp);
strncpy(tmp, str+41, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].deltan = atof(tmp);
strncpy(tmp, str+60, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].m0 = atof(tmp);
// BROADCAST ORBIT - 2
if (NULL==fgets(str, MAX_CHAR, fp))
break;
strncpy(tmp, str+3, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].cuc = atof(tmp);
strncpy(tmp, str+22, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].ecc = atof(tmp);
strncpy(tmp, str+41, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].cus = atof(tmp);
strncpy(tmp, str+60, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].sqrta = atof(tmp);
// BROADCAST ORBIT - 3
if (NULL==fgets(str, MAX_CHAR, fp))
break;
strncpy(tmp, str+3, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].toe.sec = atof(tmp);
strncpy(tmp, str+22, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].cic = atof(tmp);
strncpy(tmp, str+41, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].omg0 = atof(tmp);
strncpy(tmp, str+60, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].cis = atof(tmp);
// BROADCAST ORBIT - 4
if (NULL==fgets(str, MAX_CHAR, fp))
break;
strncpy(tmp, str+3, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].inc0 = atof(tmp);
strncpy(tmp, str+22, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].crc = atof(tmp);
strncpy(tmp, str+41, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].aop = atof(tmp);
strncpy(tmp, str+60, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].omgdot = atof(tmp);
// BROADCAST ORBIT - 5
if (NULL==fgets(str, MAX_CHAR, fp))
break;
strncpy(tmp, str+3, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].idot = atof(tmp);
strncpy(tmp, str+41, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].toe.week = (int)atof(tmp);
// BROADCAST ORBIT - 6
if (NULL==fgets(str, MAX_CHAR, fp))
break;
strncpy(tmp, str+41, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].tgd = atof(tmp);
strncpy(tmp, str+60, 19);
tmp[19] = 0;
checkExpDesignator(tmp, 19);
eph[sv].iodc = (int)atof(tmp);
// BROADCAST ORBIT - 7
if (NULL==fgets(str, MAX_CHAR, fp))
break;
eph[sv].vflg = 1;
nsat++;
}
}
else
break;
// Update the working variables
eph[sv].A = eph[sv].sqrta * eph[sv].sqrta;
eph[sv].n = sqrt(GM_EARTH/(eph[sv].A*eph[sv].A*eph[sv].A)) + eph[sv].deltan;
eph[sv].sq1e2 = sqrt(1.0 - eph[sv].ecc*eph[sv].ecc);
eph[sv].omgkdot = eph[sv].omgdot - OMEGA_EARTH;
}
fclose(fp);
return(nsat);
}
void subVect(double *y, double *x1, double *x2)
{
y[0] = x1[0]-x2[0];
y[1] = x1[1]-x2[1];
y[2] = x1[2]-x2[2];
return;
}
double normVect(double *x)
{
return(sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2]));
}
double dotProd(double *x1, double *x2)
{
return(x1[0]*x2[0]+x1[1]*x2[1]+x1[2]*x2[2]);
}
void computeRange(range_t *rho, ephem_t eph, gpstime_t g, double xyz[])
{
double pos[3],vel[3],clk[2];
double los[3];
double tau;
double range,rate;
double xrot,yrot;
// SV position at time of the pseudorange observation.
satpos(eph, g, pos, vel, clk);
// Receiver to satellite vector and light-time.
subVect(los, pos, xyz);
tau = normVect(los)/SPEED_OF_LIGHT;
// Extrapolate the satellite position backwards to the transmission time.
pos[0] -= vel[0]*tau;
pos[1] -= vel[1]*tau;
pos[2] -= vel[2]*tau;
// Earth rotation correction. The change in velocity can be neglected.
xrot = pos[0] + pos[1]*OMEGA_EARTH*tau;
yrot = pos[1] - pos[0]*OMEGA_EARTH*tau;
pos[0] = xrot;
pos[1] = yrot;
// New observer to satellite vector and satellite range.
subVect(los, pos, xyz);
range = normVect(los);
// Pseudorange.
rho->range = range - SPEED_OF_LIGHT*clk[0];
// Relative velocity of SV and receiver.
rate = dotProd(vel, los)/range;
// Pseudorange rate.
rho->rate = rate; // - SPEED_OF_LIGHT*clk[1];
// Time of application
rho->g = g;
return;
}
void computeCodePhase(channel_t *chan, range_t rho0, range_t rho1, double dt)
{
double ms;
int ims;
double rhorate;
// Pseudorange rate.
rhorate = (rho1.range - rho0.range)/dt;
// Carrier and code frequency.
chan->f_carr = -rhorate/LAMBDA_L1;
chan->f_code = CODE_FREQ + chan->f_carr*CARR_TO_CODE;
// Initial code phase and data bit counters.
ms = (((rho0.g.sec-chan->g0.sec)+6.0) - rho0.range/SPEED_OF_LIGHT)*1000.0;
ims = (int)ms;
chan->code_phase = (ms-(double)ims)*1023.0; // in chip
chan->iword = ims/600; // 1 word = 30 bits = 600 ms
ims -= chan->iword*600;
chan->ibit = ims/20; // 1 bit = 20 code = 20 ms
ims -= chan->ibit*20;
chan->icode = ims; // 1 code = 1 ms
chan->codeCA = chan->ca[(int)chan->code_phase]*2-1;
chan->dataBit = (int)((chan->dwrd[chan->iword]>>(29-chan->ibit)) & 0x1UL)*2-1;
return;
}
int readUserMotion(double xyz[USER_MOTION_SIZE][3], char *filename)
{
FILE *fp;
int numd;
double t,x,y,z;
if (NULL==(fp=fopen(filename,"rt")))
return(-1);
for (numd=0; numd<USER_MOTION_SIZE; numd++)
{
if (EOF==fscanf(fp, "%lf,%lf,%lf,%lf", &t, &x, &y, &z)) // Read CSV file
break;
xyz[numd][0] = x;
xyz[numd][1] = y;
xyz[numd][2] = z;
}
fclose(fp);
return (numd);
}
void usage(void)
{
printf("Usage: gps-sdr-sim [options]\n"
"Options:\n"
" -e <gps_nav> RINEX navigation file for GPS ephemerides (required)\n"
" -u <user_motion> User motion file (dynamic mode)\n"
" -l <location> Lat,Lon,Hgt (static mode) e.g. 30.286502,120.032669,100\n"
" -o <output> I/Q sampling data file (default: gpssim.bin)\n"
" -s <frequency> Sampling frequency [Hz] (default: 2600000)\n"
" -b <iq_bits> I/Q data format [8/16] (default: 8)\n");
return;
}
int main(int argc, char *argv[])
{
clock_t tstart,tend;
FILE *fp;
int sv;
int neph;
ephem_t eph[MAX_SAT];
gpstime_t g0;
double llh[3];
double pos[3],vel[3],clk[2];
double tmat[3][3];
double los[3];
double neu[3];
double azel[2];
int i;
int nsat;
channel_t chan[MAX_CHAN];
double elvmask = 0.0/R2D;
int isbf,iwrd;
unsigned long tow;
unsigned long sbf[5][10];
unsigned long sbfwrd;
unsigned long prevwrd;
int nib;
#ifdef _SINE_LUT
int ip,qp;
int iTable;
#else
double ip,qp;
#endif
void *iq_buff = NULL;
gpstime_t grx;
range_t rho0[MAX_SAT];
double delt;
int isamp;
int iumd;
int numd;
char umfile[MAX_CHAR];
bool staticLocationMode = false;
double xyz[USER_MOTION_SIZE][3];
char navfile[MAX_CHAR];
char outfile[MAX_CHAR];
double samp_freq;
int iq_buff_size;
int data_format;
int result;
////////////////////////////////////////////////////////////
// Read options
////////////////////////////////////////////////////////////
// Default options
navfile[0] = 0;
umfile[0] = 0;
strcpy(outfile, "gpssim.bin");
samp_freq = 2.6e6;
data_format = SC08;
if (argc<3)
{
usage();
exit(1);
}
while ((result=getopt(argc,argv,"e:u:l:o:s:b:"))!=-1)
{
switch (result)
{
case 'e':
strcpy(navfile, optarg);
break;
case 'u':
strcpy(umfile, optarg);
break;
case 'l':
// Static geodetic coordinates input mode
// Added by scateu@gmail.com
staticLocationMode = true;
sscanf(optarg,"%lf,%lf,%lf",&llh[0],&llh[1],&llh[2]);
llh[0] = llh[0] / R2D; // convert to RAD
llh[1] = llh[1] / R2D; // convert to RAD
break;
case 'o':
strcpy(outfile, optarg);
break;
case 's':
samp_freq = atof(optarg);
if (samp_freq<1.0e6)
{
printf("Invalid sampling frequency.\n");
exit(1);
}
break;
case 'b':
data_format = atoi(optarg);
if (data_format!=SC08 && data_format!=SC16)
{
printf("Invalid data format.\n");
exit(1);
}
break;
case ':':
case '?':
usage();
exit(1);
default:
break;
}
}
if (navfile[0]==0)
{
printf("GPS ephemeris file is not specified.\n");
exit(1);
}
if (umfile[0]==0 && !staticLocationMode)
{
printf("User motion file is not specified.\n");
printf("Or you may use -l to specify llh coordinate directly.\n");
exit(1);
}
// Buffer size
samp_freq = floor(samp_freq/10.0);
iq_buff_size = (int)samp_freq; // samples per 0.1sec
samp_freq *= 10.0;
delt = 1.0/samp_freq;
////////////////////////////////////////////////////////////
// Receiver position
////////////////////////////////////////////////////////////
if (!staticLocationMode)
{
// Read user motion file
numd = readUserMotion(xyz, umfile);
if (numd==-1)
{
printf("Failed to open user motion file.\n");
exit(1);
}
else if (numd==0)
{
printf("Failed to read user motion data.\n");
exit(1);
}
printf("User motion data = %d\n", numd);
// Initial location in Geodetic coordinate system
xyz2llh(xyz[0], llh);
}
else
{
// Static geodetic coordinates input mode: "-l"
// Added by scateu@gmail.com
printf("Using static location mode.\n");
llh2xyz(llh,xyz[0]); // Convert llh to xyz
numd = USER_MOTION_SIZE;
for (iumd=1; iumd<numd; iumd++)
{
xyz[iumd][0] = xyz[0][0];
xyz[iumd][1] = xyz[0][1];
xyz[iumd][2] = xyz[0][2];
}
}
printf("xyz = %11.1f, %11.1f, %11.1f\n", xyz[0][0], xyz[0][1], xyz[0][2]);
printf("llh = %11.6f, %11.6f, %11.1f\n", llh[0]*R2D, llh[1]*R2D, llh[2]);
////////////////////////////////////////////////////////////
// Read ephemeris
////////////////////////////////////////////////////////////
neph = readRinexNav(eph, navfile);
if (neph==-1)
{
printf("Failed to open ephemeris file.\n");
exit(1);
}
g0.week = -1;
if (neph>0)
{
for (sv=0; sv<MAX_SAT; sv++)
{
if (g0.week<0 && eph[sv].vflg==1)
{
g0 = eph[sv].toe; // Set simulation start time
break;
}
}
}
g0.sec = (double)(((unsigned long)g0.sec)/30UL) * 30.0; // align with the full frame length = 30 sec
printf("Start Time = %4d:%.1f\n", g0.week, g0.sec);
////////////////////////////////////////////////////////////
// Check visible satellites
////////////////////////////////////////////////////////////
for (i=0; i<MAX_CHAN; i++)
chan[i].prn = 0;
ltcmat(llh, tmat);
nsat = 0;
for (sv=0; sv<MAX_SAT; sv++)
{
if (eph[sv].vflg==1)
{
satpos(eph[sv], g0, pos, vel, clk);
subVect(los, pos, xyz[0]);
ecef2neu(los, tmat, neu);
neu2azel(azel, neu);
if (azel[1]>elvmask)
{
chan[nsat].prn = sv+1;
nsat++;
printf("%02d %6.1f %5.1f\n", sv+1, azel[0]*R2D, azel[1]*R2D);
}
}
}
printf("Number of channels = %d\n", nsat);
////////////////////////////////////////////////////////////
// Baseband signal buffer and output file
////////////////////////////////////////////////////////////
// Allocate I/Q buffer
if (data_format==SC08)
iq_buff = (signed char *)calloc(2*iq_buff_size, 1);
else
iq_buff = (short *)calloc(2*iq_buff_size, 2);
if (iq_buff==NULL)
{
printf("Faild to allocate IQ buffer.\n");
exit(1);
}
// Open output file
if (NULL==(fp=fopen(outfile,"wb")))
{
printf("Failed to open output file.\n");
exit(1);
}
////////////////////////////////////////////////////////////
// Initialize channels
////////////////////////////////////////////////////////////
// Initial reception time
grx = g0;
for (i=0; i<nsat; i++)
{
// C/A code generation
codegen(chan[i].ca, chan[i].prn);
// Allocate I/Q buffer
chan[i].iq_buff = (short *)calloc(2*iq_buff_size, 2);
if (chan[i].iq_buff==NULL)
{
printf("Faild to allocate IQ buffer.\n");
exit(1);
}
}
// Initialize carrier phase
for (i=0; i<nsat; i++)
{
range_t tmp;
double ref[3]={0.0};
double phase_offset,phase_offset_time;
double phase_ini,phase_ini_time;
sv = chan[i].prn-1;
computeRange(&tmp, eph[sv], grx, ref);
phase_offset_time = grx.sec - tmp.range/SPEED_OF_LIGHT;
phase_offset = tmp.range/LAMBDA_L1;
phase_offset -= floor(phase_offset);
computeRange(&tmp, eph[sv], grx, xyz[0]);
phase_ini_time = grx.sec - tmp.range/SPEED_OF_LIGHT;
phase_ini = phase_offset + (phase_ini_time - phase_offset_time)*SPEED_OF_LIGHT/LAMBDA_L1;
phase_ini -= floor(phase_ini);
chan[i].carr_phase = phase_ini;
}
////////////////////////////////////////////////////////////
// Generate subframes and data bits
////////////////////////////////////////////////////////////
for (i=0; i<nsat; i++)
{
sv = chan[i].prn-1;
eph2sbf(eph[sv], sbf);
chan[i].g0 = g0; // Data bit reference time
tow = ((unsigned long)g0.sec)/6UL;
prevwrd = 0UL;
for (isbf=0; isbf<N_SBF; isbf++)
{
for (iwrd=0; iwrd<10; iwrd++)
{
sbfwrd = sbf[(isbf+4)%5][iwrd]; // Start from subframe 5
// Add TOW-count message into HOW
if (iwrd==1)
sbfwrd |= ((tow&0x1FFFFUL)<<13);
// Compute checksum
sbfwrd |= (prevwrd<<30) & 0xC0000000UL; // 2 LSBs of the previous transmitted word
nib = ((iwrd==1)||(iwrd==9))?1:0; // Non-information bearing bits for word 2 and 10
chan[i].dwrd[isbf*10+iwrd] = computeChecksum(sbfwrd, nib);
prevwrd = chan[i].dwrd[isbf*10+iwrd];
}
tow++; // Next subframe
}
}
////////////////////////////////////////////////////////////
// Generate baseband signals
////////////////////////////////////////////////////////////
tstart = clock();
printf("Generating baseband signals...\n");
printf("\rTime = %4.1f", grx.sec-g0.sec);
fflush(stdout);
//
// Generate I/Q samples for every user motion data
//
// Initial pseudorange
for (i=0; i<nsat; i++)
{
sv = chan[i].prn-1;
computeRange(&rho0[sv], eph[sv], grx, xyz[0]);
}
// Update receiver time
grx.sec += 0.1;
for (iumd=1; iumd<numd; iumd++)
{
//#pragma omp parallel for private(isamp) // !!!FIXME!!! The current code runs faster without OpenMP support
// Properties -> Configuration Properties -> C/C++ -> Language -> Open MP Support -> Yes (/openmp)
for (i=0; i<nsat; i++)
{
// Refresh code phase and data bit counters
int sv = chan[i].prn-1;
range_t rho;
// Current pseudorange
computeRange(&rho, eph[sv], grx, xyz[iumd]);
// Update code phase and data bit counters
computeCodePhase(&chan[i], rho0[sv], rho, 0.1);
// Save current pseudorange
rho0[sv] = rho;
for (isamp=0; isamp<iq_buff_size; isamp++)
{
#ifdef _SINE_LUT
iTable = (int)floor(chan[i].carr_phase*512.0);
ip = chan[i].dataBit * chan[i].codeCA * cosTable512[iTable];
qp = chan[i].dataBit * chan[i].codeCA * sinTable512[iTable];
// Store I/Q samples into buffer
chan[i].iq_buff[isamp*2] = (short)ip;
chan[i].iq_buff[isamp*2+1] = (short)qp;
#else
ip = chan[i].dataBit * chan[i].codeCA * cos(2.0*PI*chan[i].carr_phase);
qp = chan[i].dataBit * chan[i].codeCA * sin(2.0*PI*chan[i].carr_phase);
// Store I/Q samples into buffer
chan[i].iq_buff[isamp*2] = (short)(ADC_GAIN*ip);
chan[i].iq_buff[isamp*2+1] = (short)(ADC_GAIN*qp);
#endif
// Update code phase
chan[i].code_phase += chan[i].f_code * delt;
if (chan[i].code_phase>=1023.0)
{
chan[i].code_phase -= 1023.0;
chan[i].icode++;
if (chan[i].icode>=20) // 20 C/A codes = 1 navigation data bit
{
chan[i].icode = 0;
chan[i].ibit++;
if (chan[i].ibit>=30) // 30 navigation data bits = 1 word
{
chan[i].ibit = 0;
chan[i].iword++;
}
// Set new navigation data bit
chan[i].dataBit = (int)((chan[i].dwrd[chan[i].iword]>>(29-chan[i].ibit)) & 0x1UL)*2-1;
}
}
// Set currnt code chip
chan[i].codeCA = chan[i].ca[(int)chan[i].code_phase]*2-1;
// Update carrier phase
chan[i].carr_phase += chan[i].f_carr * delt;
if (chan[i].carr_phase>=1.0)
chan[i].carr_phase -= 1.0;
else if (chan[i].carr_phase<0.0)
chan[i].carr_phase += 1.0;
}
} // End of omp parallel for
if (data_format==SC08)
{
for (isamp=0; isamp<2*iq_buff_size; isamp++)
{
signed char sample = 0;
for (i=0; i<nsat; i++)
sample += (signed char)(chan[i].iq_buff[isamp]>>4); // 12-bit bladeRF -> 8-bit HackRF
((signed char*)iq_buff)[isamp] = sample;
}
fwrite(iq_buff, 1, 2*iq_buff_size, fp);
}
else
{
for (isamp=0; isamp<2*iq_buff_size; isamp++)
{
short sample = 0;
for (i=0; i<nsat; i++)
sample += chan[i].iq_buff[isamp];
((short*)iq_buff)[isamp] = sample;
}
fwrite(iq_buff, 2, 2*iq_buff_size, fp);
}
// Update receiver time
grx.sec += 0.1;
// Update time counter
printf("\rTime = %4.1f", grx.sec-g0.sec);
fflush(stdout);
}
tend = clock();
printf("\nDone!\n");
// Free I/Q buffer
free(iq_buff);
for (i=0; i<nsat; i++)
free(chan[i].iq_buff);
// Close file
fclose(fp);
// Process time
printf("Process time = %.3f[sec]\n", (double)(tend-tstart)/CLOCKS_PER_SEC);
return(0);
}
|
joseph3d_back_tof_lm_2.c | /**
* @file joseph3d_back_tof_lm_2.c
*/
#include<stdio.h>
#include<stdlib.h>
#include<stdint.h>
#include<math.h>
#include<omp.h>
#include "tof_utils.h"
#include "ray_cube_intersection.h"
/** @brief 3D listmode tof joseph back projector
*
* All threads back project in one image using openmp's atomic add.
*
* @param xstart array of shape [3*nlors] with the coordinates of the start points of the LORs.
* The start coordinates of the n-th LOR are at xstart[n*3 + i] with i = 0,1,2
* @param xend array of shape [3*nlors] with the coordinates of the end points of the LORs.
* The start coordinates of the n-th LOR are at xstart[n*3 + i] with i = 0,1,2
* @param img array of shape [n0*n1*n2] containing the 3D image used for back projection (output).
* The pixel [i,j,k] ist stored at [n1*n2*i + n2*j + k].
* @param img_origin array [x0_0,x0_1,x0_2] of coordinates of the center of the [0,0,0] voxel
* @param voxsize array [vs0, vs1, vs2] of the voxel sizes
* @param p array of length nlors with the values to be back projected
* @param nlors number of geometrical LORs
* @param img_dim array with dimensions of image [n0,n1,n2]
* @param tofbin_width width of the TOF bins in spatial units (units of xstart and xend)
* @param sigma_tof array of length nlors with the TOF resolution (sigma) for each LOR in
* spatial units (units of xstart and xend)
* @param tofcenter_offset array of length nlors with the offset of the central TOF bin from the
* midpoint of each LOR in spatial units (units of xstart and xend)
* @param n_sigmas number of sigmas to consider for calculation of TOF kernel
* @param tof_bin array containing the TOF bin of each event
*/
void joseph3d_back_tof_lm_2(const float *xstart,
const float *xend,
float *img,
const float *img_origin,
const float *voxsize,
const float *p,
long long nlors,
const int *img_dim,
float tofbin_width,
const float *sigma_tof,
const float *tofcenter_offset,
float n_sigmas,
const short *tof_bin)
{
long long i;
int n0 = img_dim[0];
int n1 = img_dim[1];
int n2 = img_dim[2];
# pragma omp parallel for schedule(static)
for(i = 0; i < nlors; i++)
{
float d0, d1, d2, d0_sq, d1_sq, d2_sq;
float cs0, cs1, cs2, cf;
float lsq, cos0_sq, cos1_sq, cos2_sq;
unsigned short direction;
int i0, i1, i2;
int i0_floor, i1_floor, i2_floor;
int i0_ceil, i1_ceil, i2_ceil;
float x_pr0, x_pr1, x_pr2;
float tmp_0, tmp_1, tmp_2;
float u0, u1, u2, d_norm;
float x_m0, x_m1, x_m2;
float x_v0, x_v1, x_v2;
short it = tof_bin[i];
float dtof, tw;
float sig_tof = sigma_tof[i];
float tc_offset = tofcenter_offset[i];
float xstart0 = xstart[i*3 + 0];
float xstart1 = xstart[i*3 + 1];
float xstart2 = xstart[i*3 + 2];
float xend0 = xend[i*3 + 0];
float xend1 = xend[i*3 + 1];
float xend2 = xend[i*3 + 2];
float voxsize0 = voxsize[0];
float voxsize1 = voxsize[1];
float voxsize2 = voxsize[2];
float img_origin0 = img_origin[0];
float img_origin1 = img_origin[1];
float img_origin2 = img_origin[2];
unsigned char intersec;
float t1, t2;
float istart_f, iend_f, tmp;
int istart, iend;
float istart_tof_f, iend_tof_f;
int istart_tof, iend_tof;
// test whether the ray between the two detectors is most parallel
// with the 0, 1, or 2 axis
d0 = xend0 - xstart0;
d1 = xend1 - xstart1;
d2 = xend2 - xstart2;
//-----------
//--- test whether ray and cube intersect
intersec = ray_cube_intersection(xstart0, xstart1, xstart2,
img_origin0 - 1*voxsize0, img_origin1 - 1*voxsize1, img_origin2 - 1*voxsize2,
img_origin0 + n0*voxsize0, img_origin1 + n1*voxsize1, img_origin2 + n2*voxsize2,
d0, d1, d2, &t1, &t2);
if (intersec == 1)
{
d0_sq = d0*d0;
d1_sq = d1*d1;
d2_sq = d2*d2;
lsq = d0_sq + d1_sq + d2_sq;
cos0_sq = d0_sq / lsq;
cos1_sq = d1_sq / lsq;
cos2_sq = d2_sq / lsq;
cs0 = sqrtf(cos0_sq);
cs1 = sqrtf(cos1_sq);
cs2 = sqrtf(cos2_sq);
direction = 0;
if ((cos1_sq >= cos0_sq) && (cos1_sq >= cos2_sq))
{
direction = 1;
}
if ((cos2_sq >= cos0_sq) && (cos2_sq >= cos1_sq))
{
direction = 2;
}
//---------------------------------------------------------
//--- calculate TOF related quantities
// unit vector (u0,u1,u2) that points from xstart to end
d_norm = sqrtf(lsq);
u0 = d0 / d_norm;
u1 = d1 / d_norm;
u2 = d2 / d_norm;
// calculate mid point of LOR
x_m0 = 0.5f*(xstart0 + xend0);
x_m1 = 0.5f*(xstart1 + xend1);
x_m2 = 0.5f*(xstart2 + xend2);
//---------------------------------------------------------
if(direction == 0)
{
// case where ray is most parallel to the 0 axis
// we step through the volume along the 0 direction
// factor for correctiong voxel size and |cos(theta)|
cf = voxsize0/cs0;
//--- check where ray enters / leaves cube
istart_f = (xstart0 + t1*d0 - img_origin0) / voxsize0;
iend_f = (xstart0 + t2*d0 - img_origin0) / voxsize0;
if (istart_f > iend_f){
tmp = iend_f;
iend_f = istart_f;
istart_f = tmp;
}
istart = (int)floor(istart_f);
iend = (int)ceil(iend_f);
//-- check where we should start and stop according to the TOF kernel
//-- the tof weights outside +- 3 sigma will be close to 0 so we can
//-- ignore them
istart_tof_f = (x_m0 + (it*tofbin_width - n_sigmas*sig_tof)*u0 - img_origin0) / voxsize0;
iend_tof_f = (x_m0 + (it*tofbin_width + n_sigmas*sig_tof)*u0 - img_origin0) / voxsize0;
if (istart_tof_f > iend_tof_f){
tmp = iend_tof_f;
iend_tof_f = istart_tof_f;
istart_tof_f = tmp;
}
istart_tof = (int)floor(istart_tof_f);
iend_tof = (int)ceil(iend_tof_f);
if(istart_tof > istart){istart = istart_tof;}
if(iend_tof < iend){iend = iend_tof;}
//-----------
if (istart < 0){istart = 0;}
if (iend >= n0){iend = n0;}
//---
for(i0 = istart; i0 < iend; i0++)
{
// get the indices where the ray intersects the image plane
x_pr1 = xstart1 + (img_origin0 + i0*voxsize0 - xstart0)*d1 / d0;
x_pr2 = xstart2 + (img_origin0 + i0*voxsize0 - xstart0)*d2 / d0;
i1_floor = (int)floor((x_pr1 - img_origin1)/voxsize1);
i1_ceil = i1_floor + 1;
i2_floor = (int)floor((x_pr2 - img_origin2)/voxsize2);
i2_ceil = i2_floor + 1;
// calculate the distances to the floor normalized to [0,1]
// for the bilinear interpolation
tmp_1 = (x_pr1 - (i1_floor*voxsize1 + img_origin1)) / voxsize1;
tmp_2 = (x_pr2 - (i2_floor*voxsize2 + img_origin2)) / voxsize2;
//--------- TOF related quantities
// calculate the voxel center needed for TOF weights
x_v0 = img_origin0 + i0*voxsize0;
x_v1 = x_pr1;
x_v2 = x_pr2;
if(p[i] != 0){
// calculate distance of voxel to tof bin center
dtof = sqrtf(powf((x_m0 + (it*tofbin_width + tc_offset)*u0 - x_v0), 2) +
powf((x_m1 + (it*tofbin_width + tc_offset)*u1 - x_v1), 2) +
powf((x_m2 + (it*tofbin_width + tc_offset)*u2 - x_v2), 2));
//calculate the TOF weight
tw = 0.5f*(erff_as((dtof + 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)) -
erff_as((dtof - 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)));
if ((i1_floor >= 0) && (i1_floor < n1) && (i2_floor >= 0) && (i2_floor < n2))
{
#pragma omp atomic
img[n1*n2*i0 + n2*i1_floor + i2_floor] +=
(tw * p[i] * (1 - tmp_1) * (1 - tmp_2) * cf);
}
if ((i1_ceil >= 0) && (i1_ceil < n1) && (i2_floor >= 0) && (i2_floor < n2))
{
#pragma omp atomic
img[n1*n2*i0 + n2*i1_ceil + i2_floor] +=
(tw * p[i] * tmp_1 * (1 - tmp_2) * cf);
}
if ((i1_floor >= 0) && (i1_floor < n1) && (i2_ceil >= 0) && (i2_ceil < n2))
{
#pragma omp atomic
img[n1*n2*i0 + n2*i1_floor + i2_ceil] +=
(tw * p[i] * (1 - tmp_1) * tmp_2*cf);
}
if ((i1_ceil >= 0) && (i1_ceil < n1) && (i2_ceil >= 0) && (i2_ceil < n2))
{
#pragma omp atomic
img[n1*n2*i0 + n2*i1_ceil + i2_ceil] +=
(tw * p[i] * tmp_1 * tmp_2 * cf);
}
}
}
}
// ---------------------------------------------------------------------------------
if(direction == 1)
{
// case where ray is most parallel to the 1 axis
// we step through the volume along the 1 direction
// factor for correctiong voxel size and |cos(theta)|
cf = voxsize1/cs1;
//--- check where ray enters / leaves cube
istart_f = (xstart1 + t1*d1 - img_origin1) / voxsize1;
iend_f = (xstart1 + t2*d1 - img_origin1) / voxsize1;
if (istart_f > iend_f){
tmp = iend_f;
iend_f = istart_f;
istart_f = tmp;
}
istart = (int)floor(istart_f);
iend = (int)ceil(iend_f);
//-- check where we should start and stop according to the TOF kernel
//-- the tof weights outside +- 3 sigma will be close to 0 so we can
//-- ignore them
istart_tof_f = (x_m1 + (it*tofbin_width - n_sigmas*sig_tof)*u1 - img_origin1) / voxsize1;
iend_tof_f = (x_m1 + (it*tofbin_width + n_sigmas*sig_tof)*u1 - img_origin1) / voxsize1;
if (istart_tof_f > iend_tof_f){
tmp = iend_tof_f;
iend_tof_f = istart_tof_f;
istart_tof_f = tmp;
}
istart_tof = (int)floor(istart_tof_f);
iend_tof = (int)ceil(iend_tof_f);
if(istart_tof > istart){istart = istart_tof;}
if(iend_tof < iend){iend = iend_tof;}
//-----------
if (istart < 0){istart = 0;}
if (iend >= n1){iend = n1;}
//---
for(i1 = istart; i1 < iend; i1++)
{
// get the indices where the ray intersects the image plane
x_pr0 = xstart0 + (img_origin1 + i1*voxsize1 - xstart1)*d0 / d1;
x_pr2 = xstart2 + (img_origin1 + i1*voxsize1 - xstart1)*d2 / d1;
i0_floor = (int)floor((x_pr0 - img_origin0)/voxsize0);
i0_ceil = i0_floor + 1;
i2_floor = (int)floor((x_pr2 - img_origin2)/voxsize2);
i2_ceil = i2_floor + 1;
// calculate the distances to the floor normalized to [0,1]
// for the bilinear interpolation
tmp_0 = (x_pr0 - (i0_floor*voxsize0 + img_origin0)) / voxsize0;
tmp_2 = (x_pr2 - (i2_floor*voxsize2 + img_origin2)) / voxsize2;
//--------- TOF related quantities
// calculate the voxel center needed for TOF weights
x_v0 = x_pr0;
x_v1 = img_origin1 + i1*voxsize1;
x_v2 = x_pr2;
if(p[i] != 0){
// calculate distance of voxel to tof bin center
dtof = sqrtf(powf((x_m0 + (it*tofbin_width + tc_offset)*u0 - x_v0), 2) +
powf((x_m1 + (it*tofbin_width + tc_offset)*u1 - x_v1), 2) +
powf((x_m2 + (it*tofbin_width + tc_offset)*u2 - x_v2), 2));
//calculate the TOF weight
tw = 0.5f*(erff_as((dtof + 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)) -
erff_as((dtof - 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)));
if ((i0_floor >= 0) && (i0_floor < n0) && (i2_floor >= 0) && (i2_floor < n2))
{
#pragma omp atomic
img[n1*n2*i0_floor + n2*i1 + i2_floor] +=
(tw * p[i] * (1 - tmp_0) * (1 - tmp_2) * cf);
}
if ((i0_ceil >= 0) && (i0_ceil < n0) && (i2_floor >= 0) && (i2_floor < n2))
{
#pragma omp atomic
img[n1*n2*i0_ceil + n2*i1 + i2_floor] +=
(tw * p[i] * tmp_0 * (1 - tmp_2) * cf);
}
if ((i0_floor >= 0) && (i0_floor < n0) && (i2_ceil >= 0) && (i2_ceil < n2))
{
#pragma omp atomic
img[n1*n2*i0_floor + n2*i1 + i2_ceil] +=
(tw * p[i] * (1 - tmp_0) * tmp_2 * cf);
}
if((i0_ceil >= 0) && (i0_ceil < n0) && (i2_ceil >= 0) && (i2_ceil < n2))
{
#pragma omp atomic
img[n1*n2*i0_ceil + n2*i1 + i2_ceil] +=
(tw * p[i] * tmp_0 * tmp_2 * cf);
}
}
}
}
//---------------------------------------------------------------------------------
if (direction == 2)
{
// case where ray is most parallel to the 2 axis
// we step through the volume along the 2 direction
// factor for correctiong voxel size and |cos(theta)|
cf = voxsize2/cs2;
//--- check where ray enters / leaves cube
istart_f = (xstart2 + t1*d2 - img_origin2) / voxsize2;
iend_f = (xstart2 + t2*d2 - img_origin2) / voxsize2;
if (istart_f > iend_f){
tmp = iend_f;
iend_f = istart_f;
istart_f = tmp;
}
istart = (int)floor(istart_f);
iend = (int)ceil(iend_f);
//-- check where we should start and stop according to the TOF kernel
//-- the tof weights outside +- 3 sigma will be close to 0 so we can
//-- ignore them
istart_tof_f = (x_m2 + (it*tofbin_width - n_sigmas*sig_tof)*u2 - img_origin2) / voxsize2;
iend_tof_f = (x_m2 + (it*tofbin_width + n_sigmas*sig_tof)*u2 - img_origin2) / voxsize2;
if (istart_tof_f > iend_tof_f){
tmp = iend_tof_f;
iend_tof_f = istart_tof_f;
istart_tof_f = tmp;
}
istart_tof = (int)floor(istart_tof_f);
iend_tof = (int)ceil(iend_tof_f);
if(istart_tof > istart){istart = istart_tof;}
if(iend_tof < iend){iend = iend_tof;}
//-----------
if (istart < 0){istart = 0;}
if (iend >= n2){iend = n2;}
//---
for(i2 = istart; i2 < iend; i2++)
{
// get the indices where the ray intersects the image plane
x_pr0 = xstart0 + (img_origin2 + i2*voxsize2 - xstart2)*d0 / d2;
x_pr1 = xstart1 + (img_origin2 + i2*voxsize2 - xstart2)*d1 / d2;
i0_floor = (int)floor((x_pr0 - img_origin0)/voxsize0);
i0_ceil = i0_floor + 1;
i1_floor = (int)floor((x_pr1 - img_origin1)/voxsize1);
i1_ceil = i1_floor + 1;
// calculate the distances to the floor normalized to [0,1]
// for the bilinear interpolation
tmp_0 = (x_pr0 - (i0_floor*voxsize0 + img_origin0)) / voxsize0;
tmp_1 = (x_pr1 - (i1_floor*voxsize1 + img_origin1)) / voxsize1;
//--------- TOF related quantities
// calculate the voxel center needed for TOF weights
x_v0 = x_pr0;
x_v1 = x_pr1;
x_v2 = img_origin2 + i2*voxsize2;
if(p[i] != 0){
// calculate distance of voxel to tof bin center
dtof = sqrtf(powf((x_m0 + (it*tofbin_width + tc_offset)*u0 - x_v0), 2) +
powf((x_m1 + (it*tofbin_width + tc_offset)*u1 - x_v1), 2) +
powf((x_m2 + (it*tofbin_width + tc_offset)*u2 - x_v2), 2));
//calculate the TOF weight
tw = 0.5f*(erff_as((dtof + 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)) -
erff_as((dtof - 0.5f*tofbin_width)/(sqrtf(2)*sig_tof)));
if ((i0_floor >= 0) && (i0_floor < n0) && (i1_floor >= 0) && (i1_floor < n1))
{
#pragma omp atomic
img[n1*n2*i0_floor + n2*i1_floor + i2] +=
(tw * p[i] * (1 - tmp_0) * (1 - tmp_1) * cf);
}
if ((i0_ceil >= 0) && (i0_ceil < n0) && (i1_floor >= 0) && (i1_floor < n1))
{
#pragma omp atomic
img[n1*n2*i0_ceil + n2*i1_floor + i2] +=
(tw * p[i] * tmp_0 * (1 - tmp_1) * cf);
}
if ((i0_floor >= 0) && (i0_floor < n0) && (i1_ceil >= 0) && (i1_ceil < n1))
{
#pragma omp atomic
img[n1*n2*i0_floor + n2*i1_ceil + i2] +=
(tw * p[i] * (1 - tmp_0) * tmp_1 * cf);
}
if ((i0_ceil >= 0) && (i0_ceil < n0) && (i1_ceil >= 0) && (i1_ceil < n1))
{
#pragma omp atomic
img[n1*n2*i0_ceil + n2*i1_ceil + i2] +=
(tw * p[i] * tmp_0 * tmp_1 * cf);
}
}
}
}
}
}
}
|
particlefilter.c | /**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
// RISC-V VECTOR Version by Cristóbal Ramírez Lazo, "Barcelona 2019"
#ifdef USE_RISCV_VECTOR
#include "../../common/vector_defines.h"
#endif
//#include <omp.h>
#include <limits.h>
#define PI 3.1415926535897932
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
#ifdef USE_RISCV_VECTOR
inline _MMR_f64 randu_vector(long int * seed, int index ,unsigned long int gvl)
{
/*
_MMR_i64 xseed = _MM_LOAD_i64(&seed[index],gvl);
_MMR_i64 xA = _MM_SET_i64(A,gvl);
_MMR_i64 xC = _MM_SET_i64(C,gvl);
_MMR_i64 xM = _MM_SET_i64(M,gvl);
xseed = _MM_MUL_i64(xseed,xA,gvl);
xseed = _MM_ADD_i64(xseed,xC,gvl);
_MM_STORE_i64(&seed[index],_MM_REM_i64(xseed,xM,gvl),gvl);
FENCE();
_MMR_f64 xResult;
xResult = _MM_DIV_f64(_MM_VFCVT_F_X_f64(xseed,gvl),_MM_VFCVT_F_X_f64(xM,gvl),gvl);
xResult = _MM_VFSGNJX_f64(xResult,xResult,gvl);
return xResult;
*/
/*
Esta parte del codigo deberia ser en 32 bits, pero las instrucciones de conversion aún no están disponibles,
moviendo todo a 64 bits el resultado cambia ya que no se desborda, y las variaciones son muchas.
*/
double result[256];
int num[256];
//FENCE();
//double* result = (double*)malloc(gvl*sizeof(double));
//int* num = (int*)malloc(gvl*sizeof(int));
FENCE();
for(int x = index; x < index+gvl; x++){
num[x-index] = A*seed[x] + C;
seed[x] = num[x-index] % M;
result[x-index] = fabs(seed[x]/((double) M));
}
_MMR_f64 xResult;
xResult = _MM_LOAD_f64(&result[0],gvl);
FENCE();
return xResult;
}
#endif // USE_RISCV_VECTOR
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
#ifdef USE_RISCV_VECTOR
inline _MMR_f64 randn_vector(long int * seed, int index ,unsigned long int gvl){
/*Box-Muller algorithm*/
_MMR_f64 xU = randu_vector(seed,index,gvl);
_MMR_f64 xV = randu_vector(seed,index,gvl);
_MMR_f64 xCosine;
_MMR_f64 xRt;
xV = _MM_MUL_f64(_MM_SET_f64(PI*2.0,gvl),xV,gvl);
xCosine =_MM_COS_f64(xV,gvl);
FENCE();
xU = _MM_LOG_f64(xU,gvl);
xRt = _MM_MUL_f64(_MM_SET_f64(-2.0,gvl),xU,gvl);
return _MM_MUL_f64(_MM_SQRT_f64(xRt,gvl),xCosine,gvl);
}
#endif // USE_RISCV_VECTOR
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((I[ind[y]] - 100),2) - pow((I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
// for(int a = 0; a < lengthCDF; a++)
// {
// printf("%f ",CDF[a]);
// }
// printf("\n");
// printf("CDF[x] >= value ,%f >= %f \n",CDF[0],value);
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses binary search before switching to sequential search
* @param CDF The CDF
* @param beginIndex The index to start searching from
* @param endIndex The index to stop searching
* @param value The value to find
* @return The index of value in the CDF; if value is never found, returns the last index
* @warning Use at your own risk; not fully tested
*/
int findIndexBin(double * CDF, int beginIndex, int endIndex, double value){
if(endIndex < beginIndex)
return -1;
int middleIndex = beginIndex + ((endIndex - beginIndex)/2);
/*check the value*/
if(CDF[middleIndex] >= value)
{
/*check that it's good*/
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(middleIndex > 0 && CDF[middleIndex-1] == value)
middleIndex--;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
return findIndexBin(CDF, beginIndex, middleIndex+1, value);
return findIndexBin(CDF, middleIndex-1, endIndex, value);
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
//printf("countOnes = %d \n",countOnes); // 69
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
//#pragma omp parallel for shared(weights, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
double * u = (double *)malloc(sizeof(double)*Nparticles);
int * ind = (int*)malloc(sizeof(int)*countOnes*Nparticles);
//#pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time()));
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
//#pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] += 1 + 5*randn(seed, x);
arrayY[x] += -2 + 2*randn(seed, x);
}
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
//particle filter likelihood
//#pragma omp parallel for shared(likelihood, I, arrayX, arrayY, objxy, ind) private(x, y, indX, indY)
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[x*countOnes + y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[x*countOnes + y] >= max_size)
ind[x*countOnes + y] = 0;
}
likelihood[x] = 0;
for(y = 0; y < countOnes; y++)
likelihood[x] += (pow((I[ind[x*countOnes + y]] - 100),2) - pow((I[ind[x*countOnes + y]]-228),2))/50.0;
likelihood[x] = likelihood[x]/((double) countOnes);
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
//#pragma omp parallel for shared(Nparticles, weights, likelihood) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
//#pragma omp parallel for private(x) reduction(+:sumWeights)
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
//#pragma omp parallel for shared(sumWeights, weights) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
//#pragma omp parallel for private(x) reduction(+:xe, ye)
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
//#pragma omp parallel for shared(u, u1, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
int j, i;
//#pragma omp parallel for shared(CDF, Nparticles, xj, yj, u, arrayX, arrayY) private(i, j)
for(j = 0; j < Nparticles; j++){
i = findIndex(CDF, Nparticles, u[j]);
if(i == -1)
i = Nparticles-1;
//printf("%d ", i);
xj[j] = arrayX[i];
yj[j] = arrayY[i];
}
//printf("\n");
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
//#pragma omp parallel for shared(weights, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(xj);
free(yj);
free(arrayX);
free(arrayY);
free(CDF);
free(u);
free(ind);
}
#ifdef USE_RISCV_VECTOR
void particleFilter_vector(int * I, int IszX, int IszY, int Nfr, int * seed, long int * seed_64, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
//printf("countOnes = %d \n",countOnes); // 69
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
//#pragma omp parallel for shared(weights, Nparticles) private(x)
/*
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}*/
unsigned long int gvl = __builtin_epi_vsetvl(Nparticles, __epi_e64, __epi_m1);
_MMR_f64 xweights = _MM_SET_f64(1.0/((double)(Nparticles)),gvl);
for(x = 0; x < Nparticles; x=x+gvl){
gvl = __builtin_epi_vsetvl(Nparticles-x, __epi_e64, __epi_m1);
_MM_STORE_f64(&weights[x],xweights,gvl);
}
FENCE();
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
double * u = (double *)malloc(sizeof(double)*Nparticles);
int * ind = (int*)malloc(sizeof(int)*countOnes*Nparticles);
/*
//#pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
*/
gvl = __builtin_epi_vsetvl(Nparticles, __epi_e64, __epi_m1);
_MMR_f64 xArrayX = _MM_SET_f64(xe,gvl);
_MMR_f64 xArrayY = _MM_SET_f64(ye,gvl);
for(int i = 0; i < Nparticles; i=i+gvl){
gvl = __builtin_epi_vsetvl(Nparticles-i, __epi_e64, __epi_m1);
_MM_STORE_f64(&arrayX[i],xArrayX,gvl);
_MM_STORE_f64(&arrayY[i],xArrayY,gvl);
}
FENCE();
_MMR_f64 xAux;
int k;
printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time()));
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
gvl = __builtin_epi_vsetvl(Nparticles, __epi_e64, __epi_m1);
for(x = 0; x < Nparticles; x=x+gvl){
gvl = __builtin_epi_vsetvl(Nparticles-x, __epi_e64, __epi_m1);
xArrayX = _MM_LOAD_f64(&arrayX[x],gvl);
FENCE();
xAux = randn_vector(seed_64, x,gvl);
FENCE();
xAux = _MM_MUL_f64(xAux, _MM_SET_f64(5.0,gvl),gvl);
xAux = _MM_ADD_f64(xAux, _MM_SET_f64(1.0,gvl),gvl);
xArrayX = _MM_ADD_f64(xAux, xArrayX ,gvl);
_MM_STORE_f64(&arrayX[x],xArrayX,gvl);
xArrayY = _MM_LOAD_f64(&arrayY[x],gvl);
FENCE();
xAux = randn_vector(seed_64, x,gvl);
FENCE();
xAux = _MM_MUL_f64(xAux, _MM_SET_f64(2.0,gvl),gvl);
xAux = _MM_ADD_f64(xAux, _MM_SET_f64(-2.0,gvl),gvl);
xArrayY = _MM_ADD_f64(xAux, xArrayY ,gvl);
_MM_STORE_f64(&arrayY[x],xArrayY,gvl);
}
FENCE();
/*
//#pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] += 1 + 5*randn(seed, x);
arrayY[x] += -2 + 2*randn(seed, x);
}
*/
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
//particle filter likelihood
//#pragma omp parallel for shared(likelihood, I, arrayX, arrayY, objxy, ind) private(x, y, indX, indY)
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[x*countOnes + y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[x*countOnes + y] >= max_size)
ind[x*countOnes + y] = 0;
}
likelihood[x] = 0;
for(y = 0; y < countOnes; y++)
likelihood[x] += (pow((I[ind[x*countOnes + y]] - 100),2) - pow((I[ind[x*countOnes + y]]-228),2))/50.0;
likelihood[x] = likelihood[x]/((double) countOnes);
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
//#pragma omp parallel for shared(Nparticles, weights, likelihood) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
//#pragma omp parallel for private(x) reduction(+:sumWeights)
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
//#pragma omp parallel for shared(sumWeights, weights) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
//#pragma omp parallel for private(x) reduction(+:xe, ye)
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
//#pragma omp parallel for shared(u, u1, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
int j, i;
_MMR_MASK_i64 xComp;
_MMR_i64 xMask;
_MMR_f64 xCDF;
_MMR_f64 xU;
_MMR_i64 xArray;
long int vector_complete;
long int * locations = (long int *)malloc(sizeof(long int)*Nparticles);
long int valid;
gvl = __builtin_epi_vsetvl(Nparticles, __epi_e64, __epi_m1);
for(i = 0; i < Nparticles; i=i+gvl){
gvl = __builtin_epi_vsetvl(Nparticles-i, __epi_e64, __epi_m1);
vector_complete = 0;
xMask = _MM_SET_i64(0,gvl);
xArray = _MM_SET_i64(Nparticles-1,gvl);
xU = _MM_LOAD_f64(&u[i],gvl);
for(j = 0; j < Nparticles; j++){
xCDF = _MM_SET_f64(CDF[j],gvl);
xComp = _MM_VFGE_f64(xCDF,xU,gvl);
xComp = _MM_CAST_i1_i64(_MM_XOR_i64(_MM_CAST_i64_i1(xComp),xMask,gvl));
valid = _MM_VMFIRST_i64(xComp,gvl);
if(valid != -1)
{
xArray = _MM_MERGE_i64(xArray,_MM_SET_i64(j,gvl),xComp,gvl);
xMask = _MM_OR_i64(_MM_CAST_i64_i1(xComp),xMask,gvl);
vector_complete = _MM_VMPOPC_i64(_MM_CAST_i1_i64(xMask),gvl);
}
if(vector_complete == gvl){ break; }
//FENCE();
}
_MM_STORE_i64(&locations[i],xArray,gvl);
}
FENCE();
//for(i = 0; i < Nparticles; i++) { printf("%d ", locations[i]); } printf("\n");
//#pragma omp parallel for shared(CDF, Nparticles, xj, yj, u, arrayX, arrayY) private(i, j)
for(j = 0; j < Nparticles; j++){
i = locations[j];
xj[j] = arrayX[i];
yj[j] = arrayY[i];
}
// for(j = 0; j < Nparticles; j++){ printf("%lf ", xj[i]); } printf("\n");
// for(j = 0; j < Nparticles; j++){ printf("%lf ", yj[i]); } printf("\n");
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
//#pragma omp parallel for shared(weights, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(xj);
free(yj);
free(arrayX);
free(arrayY);
free(CDF);
free(u);
free(ind);
}
#endif
int main(int argc, char * argv[]){
char* usage = "openmp.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
{
seed[i] = time(0)*i;
}
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); // 128 * 128 * 10 = 163840 * sizeof(int)
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
#ifdef USE_RISCV_VECTOR
long int * seed_64 = (long int *)malloc(sizeof(long int)*Nparticles);
for(i = 0; i < Nparticles; i++)
{
seed_64[i] = (long int)seed[i];
}
//call particle filter
particleFilter_vector(I, IszX, IszY, Nfr, seed,seed_64, Nparticles);
#else
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
#endif
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <omp.h>
#define NDEBUG
#ifndef NDEBUG
#define DEBUG(cmd) cmd;
#else
#define DEBUG(cmd) ;
#endif
// Check if number is prime
bool is_prime(int n)
{
DEBUG(
int thread_id = omp_get_thread_num();
printf("[Thread %.2d] Check number %d\n", thread_id, n);
)
for (int i = 2; i < n; i++)
{
if (n % i == 0)
{
return false;
}
}
return true;
}
int main(int argc, char** argv)
{
int n = 10; // default number of elements in range
if (argc == 2)
{
n = atoi(argv[1]); // read n from cmd input
}
int primes_len = 0; // result
#pragma omp parallel for schedule(static) reduction(+:primes_len)
for (int i = 2; i <= n; i++)
{
if (is_prime(i))
{
primes_len++;
}
}
printf("The number of primes in range 1 ... %d: %d\n", n, primes_len);
return 0;
}
|
test_vectorize.c | #include "Python.h"
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER)
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
static int __mini_mangle___pyx_array_expression17tiled_c(Py_ssize_t const *const CYTHON_RESTRICT __mini_mangle___pyx_shape, double *const CYTHON_RESTRICT __pyx_op1_data, Py_ssize_t const *const CYTHON_RESTRICT __pyx_op1_strides, double const *const CYTHON_RESTRICT __pyx_op2_data, Py_ssize_t const *const CYTHON_RESTRICT __pyx_op2_strides, double const *const CYTHON_RESTRICT __pyx_op3_data, Py_ssize_t const *const CYTHON_RESTRICT __pyx_op3_strides, double const *const CYTHON_RESTRICT __pyx_op4_data, Py_ssize_t const *const CYTHON_RESTRICT __pyx_op4_strides, double const *const CYTHON_RESTRICT __pyx_op5_data, Py_ssize_t const *const CYTHON_RESTRICT __pyx_op5_strides, double const *const CYTHON_RESTRICT __pyx_op6_data, Py_ssize_t const *const CYTHON_RESTRICT __pyx_op6_strides, Py_ssize_t const __mini_mangle_blocksize, Py_ssize_t const __mini_mangle_omp_size) {
Py_ssize_t __mini_mangle_temp0;
Py_ssize_t __mini_mangle_temp1;
__mini_mangle_temp0 = ((__mini_mangle___pyx_shape[0]) * (__mini_mangle___pyx_shape[1]));
#ifdef _OPENMP
#pragma omp parallel for if((__mini_mangle_temp0 > __mini_mangle_omp_size))
#endif
for (__mini_mangle_temp1 = 0; __mini_mangle_temp1 < (__mini_mangle___pyx_shape[0]); __mini_mangle_temp1 = (__mini_mangle_temp1 + __mini_mangle_blocksize)) {
Py_ssize_t __mini_mangle_temp2;
for (__mini_mangle_temp2 = 0; __mini_mangle_temp2 < (__mini_mangle___pyx_shape[1]); __mini_mangle_temp2 = (__mini_mangle_temp2 + __mini_mangle_blocksize)) {
Py_ssize_t __mini_mangle_temp3;
Py_ssize_t __mini_mangle_temp4;
Py_ssize_t __mini_mangle_temp5;
__mini_mangle_temp3 = (((__mini_mangle_temp1 + __mini_mangle_blocksize) < (__mini_mangle___pyx_shape[1])) ? (__mini_mangle_temp1 + __mini_mangle_blocksize) : (__mini_mangle___pyx_shape[1]));
__mini_mangle_temp4 = (((__mini_mangle_temp2 + __mini_mangle_blocksize) < (__mini_mangle___pyx_shape[0])) ? (__mini_mangle_temp2 + __mini_mangle_blocksize) : (__mini_mangle___pyx_shape[0]));
for (__mini_mangle_temp5 = __mini_mangle_temp2; __mini_mangle_temp5 < __mini_mangle_temp4; __mini_mangle_temp5++) {
double *CYTHON_RESTRICT __mini_mangle_temp6;
double const *CYTHON_RESTRICT __mini_mangle_temp7;
double const *CYTHON_RESTRICT __mini_mangle_temp8;
double const *CYTHON_RESTRICT __mini_mangle_temp9;
double const *CYTHON_RESTRICT __mini_mangle_temp10;
double const *CYTHON_RESTRICT __mini_mangle_temp11;
Py_ssize_t __mini_mangle_temp12;
__mini_mangle_temp6 = ((double *) (((char *) __pyx_op1_data) + (__mini_mangle_temp5 * (__pyx_op1_strides[0]))));
__mini_mangle_temp7 = ((double *) (((char *) __pyx_op2_data) + (__mini_mangle_temp5 * (__pyx_op2_strides[0]))));
__mini_mangle_temp8 = ((double *) (((char *) __pyx_op3_data) + (__mini_mangle_temp5 * (__pyx_op3_strides[0]))));
__mini_mangle_temp9 = ((double *) (((char *) __pyx_op4_data) + (__mini_mangle_temp5 * (__pyx_op4_strides[0]))));
__mini_mangle_temp10 = ((double *) (((char *) __pyx_op5_data) + (__mini_mangle_temp5 * (__pyx_op5_strides[0]))));
__mini_mangle_temp11 = ((double *) (((char *) __pyx_op6_data) + (__mini_mangle_temp5 * (__pyx_op6_strides[0]))));
#ifdef __INTEL_COMPILER
#pragma simd
#endif
for (__mini_mangle_temp12 = __mini_mangle_temp1; __mini_mangle_temp12 < __mini_mangle_temp3; __mini_mangle_temp12++) {
(*((double *CYTHON_RESTRICT) (((char *) __mini_mangle_temp6) + (__mini_mangle_temp12 * (__pyx_op1_strides[1]))))) = (((((*((double const *CYTHON_RESTRICT) (((char *) __mini_mangle_temp7) + (__mini_mangle_temp12 * (__pyx_op2_strides[1]))))) + (*((double const *CYTHON_RESTRICT) (((char *) __mini_mangle_temp8) + (__mini_mangle_temp12 * (__pyx_op3_strides[1])))))) + (*((double const *CYTHON_RESTRICT) (((char *) __mini_mangle_temp9) + (__mini_mangle_temp12 * (__pyx_op4_strides[1])))))) + (*((double const *CYTHON_RESTRICT) (((char *) __mini_mangle_temp10) + (__mini_mangle_temp12 * (__pyx_op5_strides[1])))))) + (*((double const *CYTHON_RESTRICT) (((char *) __mini_mangle_temp11) + (__mini_mangle_temp12 * (__pyx_op6_strides[1]))))));
}
}
}
}
return 0;
}
void *list[] = {
__mini_mangle___pyx_array_expression17tiled_c,
};
|
MacroLegalizer.h | /**
* @file MacroLegalizer.h
* @author Tingyuan LIANG (tliang@connect.ust.hk)
* @brief This header file contains the definitions of MacroLegalizer class and its internal modules and APIs which
* map DSP/BRAM/CARRY macros to legal location
* @version 0.1
* @date 2021-10-02
*
* @copyright Copyright (c) 2021 Reconfiguration Computing Systems Lab, The Hong Kong University of Science and
* Technology. All rights reserved.
*
*/
#ifndef _MACROLEGALIZER
#define _MACROLEGALIZER
#include "DesignInfo.h"
#include "DeviceInfo.h"
#include "MinCostBipartiteMatcher.h"
#include "PlacementInfo.h"
#include "dumpZip.h"
#include "sysInfo.h"
#include <assert.h>
#include <fstream>
#include <iostream>
#include <map>
#include <mutex>
#include <omp.h>
#include <semaphore.h>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <vector>
/**
* @brief MacroLegalizer maps DSP/BRAM/CARRY macros to legal location.
*
* (RAMB36E2 will be treated as 1+1 RAMB18E2, one of which is a virtual cell)
*
* The legalization procedure will only conduct rough legalization in the early iterations of global placement, and it
* will conduct exact legalization following rough legalization when the macros are close enough to their potential
* legal positions.
*/
class MacroLegalizer
{
public:
/**
* @brief Construct a new MacroLegalizer object
*
* @param legalizerName the name string of legalizer for log dumping
* @param placementInfo the PlacementInfo for this placer to handle
* @param deviceInfo device information
* @param macroTypesToLegalize a vector of Cell Type string indicating the target types handled by this
* MacroLegalizer
* @param JSONCfg the user-defined placement configuration
*/
MacroLegalizer(std::string legalizerName, PlacementInfo *placementInfo, DeviceInfo *deviceInfo,
std::vector<DesignInfo::DesignCellType> ¯oTypesToLegalize,
std::map<std::string, std::string> &JSONCfg);
~MacroLegalizer()
{
if (minCostBipartiteMatcher)
delete minCostBipartiteMatcher;
}
/**
* @brief conduct legalization and map the PlacementUnit of one of the given types to sites
*
* @param exactLegalization true to ensure elements in a macro are consecutive
* @param directLegalization direct legalize the macros without rough legalization phase
*/
void legalize(bool exactLegalization = false, bool directLegalization = false);
/**
* @brief Get the average displacement of exact legalization for the involved PlacementUnit
*
* Exact legalization ensures elements in a macro are placed consecutively.
*
* @return float
*/
inline float getAverageDisplacementOfExactLegalization()
{
if (finalAverageDisplacement > 1000)
return finalAverageDisplacement;
float tmpAverageDisplacement = 0.0;
for (auto PUSitePair : PULevelMatching)
{
tmpAverageDisplacement += std::fabs(PUSitePair.first->X() - PUSitePair.second->X()) +
std::fabs(PUSitePair.first->Y() - PUSitePair.second->Y());
}
tmpAverageDisplacement /= PULevelMatching.size();
return tmpAverageDisplacement;
}
/**
* @brief Get the average displacement of rough legalization for the involved PlacementUnit
*
* Rough legalization does not guarantee that elements in a macro are placed consecutively.
*
* @return float
*/
inline float getAverageDisplacementOfRoughLegalization()
{
return roughAverageDisplacement;
}
void dumpMatching(bool fixedColumn = false, bool enforce = false);
/**
* @brief Set the intitial parameters of the legalizer
*
* @param displacementThr displacement threshold to detect potential legal sites
* @param candidateNum the maximum number of final candidate sites
* @param _candidateFactor we are allowed to detect a excessive number (>candidateNum) of initial candidates
*/
void setIntitialParameters(float displacementThr, int candidateNum, int _candidateFactor = -1)
{
initialDisplacementThreshold = displacementThr;
initialMaxNumCandidate = candidateNum;
if (_candidateFactor > 1)
{
candidateFactor = _candidateFactor;
}
}
/**
* @brief reset the mapped flag of the involved sites.
*
* A mapped site will not be binded to another PlacementUnit.
*
*/
void resetSitesMapped();
private:
std::string legalizerName;
PlacementInfo *placementInfo;
DeviceInfo *deviceInfo;
/**
* @brief compatiblePlacementTable describes the type mapping from design to device, where a cell can be placed
* (which BEL in which site)
*
*/
PlacementInfo::CompatiblePlacementTable *compatiblePlacementTable;
/**
* @brief a vector of Cell Type string indicating the target types handled by this MacroLegalizer
*
*/
std::vector<DesignInfo::DesignCellType> macroTypesToLegalize;
/**
* @brief a reference of the locations of cells (in cellId order)
*
*/
std::vector<PlacementInfo::Location> &cellLoc;
std::map<std::string, std::string> &JSONCfg;
/**
* @brief min-cost bipartite matching solver for the legalization
*
*/
MinCostBipartiteMatcher *minCostBipartiteMatcher = nullptr;
/**
* @brief a vector storing the Design cells which have NOT been legalized
*
*/
std::vector<DesignInfo::DesignCell *> macroCellsToLegalize;
/**
* @brief a vector storing the cells in macros which SHOULD be legalized
*
*/
std::vector<DesignInfo::DesignCell *> initialMacrosToLegalize;
/**
* @brief a set storing the macros which have NOT been legalized
*
*/
std::set<PlacementInfo::PlacementUnit *> macroUnitsToLegalizeSet;
/**
* @brief a map record the potential sites of different site types
*
*/
std::map<DesignInfo::DesignCellType, std::vector<DeviceInfo::DeviceSite *>> macroType2Sites;
/**
* @brief record the mapping from cells to the candidate sites which are NOT binded to other cells
*
* Please be aware that a cell might be binded of multiple sites.
*
*/
std::map<DesignInfo::DesignCell *, std::vector<DeviceInfo::DeviceSite *>> macro2Sites;
/**
* @brief a cache record the candidate sites within a given displacement threshold for each cell in the macros
*
*/
std::map<DesignInfo::DesignCell *, std::vector<DeviceInfo::DeviceSite *> *> macro2SitesInDisplacementThreshold;
/**
* @brief map sites to temperary indexes for bipartite matching
*
*/
std::map<DeviceInfo::DeviceSite *, int> rightSiteIds;
/**
* @brief a vector for the candidate sites for bipartite matching
*
*/
std::vector<DeviceInfo::DeviceSite *> siteList;
/**
* @brief the adjacent list of the bipartite graph
*
*/
std::vector<std::vector<std::pair<int, float>>> adjList;
/**
* @brief a set of cells in macros binded to corresponding DeviceSites
*
*/
std::set<DesignInfo::DesignCell *> matchedMacroCells;
/**
* @brief a set of DeviceSites binded to corresponding PlacementUnits
*
*/
std::set<DeviceInfo::DeviceSite *> matchedSites;
/**
* @brief record the binding between design standard cells and DeviceSites as a vector of pairs
*
*/
std::vector<std::pair<DesignInfo::DesignCell *, DeviceInfo::DeviceSite *>> cellLevelMatching;
/**
* @brief record the binding between PlacementUnits and DeviceSites as a vector of pairs
*
*/
std::vector<std::pair<PlacementInfo::PlacementUnit *, DeviceInfo::DeviceSite *>> PULevelMatching;
int DumpMacroLegalizationCnt = 0;
/**
* @brief displacement threshold to detect potential legal sites
*
*/
float displacementThreshold = 30;
/**
* @brief the maximum number of final candidate sites
*
*/
int maxNumCandidate = 30;
/**
* @brief the number of BRAM columns on the target device
*
*/
int BRAMColumnNum = -1;
/**
* @brief the number of DSP columns on the target device
*
*/
int DSPColumnNum = -1;
/**
* @brief the number of CARRY columns on the target device
*
*/
int CARRYColumnNum = -1;
/**
* @brief the number of BRAM rows on the target device
*
*/
int BRAMRowNum = -1;
/**
* @brief the number of DSP rows on the target device
*
*/
int DSPRowNum = -1;
/**
* @brief the number of CARRY rows on the target device
*
*/
int CARRYRowNum = -1;
/**
* @brief the floating-point X location of the BRAM columns on the device
*
*/
std::vector<float> BRAMColumnXs;
/**
* @brief the floating-point X location of the DSP columns on the device
*
*/
std::vector<float> DSPColumnXs;
/**
* @brief the floating-point X location of the CARRY columns on the device
*
*/
std::vector<float> CARRYColumnXs;
/**
* @brief record the sites in each column of BRAM
*
*/
std::vector<std::vector<DeviceInfo::DeviceSite *>> BRAMColumn2Sites;
/**
* @brief record the sites in each column of DSP
*
*/
std::vector<std::vector<DeviceInfo::DeviceSite *>> DSPColumn2Sites;
/**
* @brief record the sites in each column of CARRY
*
*/
std::vector<std::vector<DeviceInfo::DeviceSite *>> CARRYColumn2Sites;
/**
* @brief record the PlacementUnits in each column of BRAM Sites
*
*/
std::vector<std::deque<PlacementInfo::PlacementUnit *>> BRAMColumn2PUs;
/**
* @brief record the PlacementUnits in each column of DSP Sites
*
*/
std::vector<std::deque<PlacementInfo::PlacementUnit *>> DSPColumn2PUs;
/**
* @brief record the PlacementUnits in each column of CARRY
*
*/
std::vector<std::deque<PlacementInfo::PlacementUnit *>> CARRYColumn2PUs;
/**
* @brief record the number of cells (Macro contains multiple cells) in each column for BRAM
*
*/
std::vector<int> BRAMColumnUntilization;
/**
* @brief record the number of cells (Macro contains multiple cells) in each column for DSP
*
*/
std::vector<int> DSPColumnUntilization;
/**
* @brief record the number of cells (Macro contains multiple cells) in each column for CARRY
*
*/
std::vector<int> CARRYColumnUntilization;
/**
* @brief record the PlacementUnits in each column of BRAM site
*
*/
std::map<DesignInfo::DesignCell *, int> BRAMCell2Column;
/**
* @brief record the PlacementUnits in each column of DSP site
*
*/
std::map<DesignInfo::DesignCell *, int> DSPCell2Column;
/**
* @brief record the PlacementUnits in each column of CARRY site
*
*/
std::map<DesignInfo::DesignCell *, int> CARRYCell2Column;
std::map<PlacementInfo::PlacementUnit *, std::vector<DeviceInfo::DeviceSite *>> PU2LegalSites;
/**
* @brief record the mapping from PlacementUnits to exact DeviceSite location X
*
*/
std::map<PlacementInfo::PlacementUnit *, float> PU2X;
/**
* @brief record the mapping from PlacementUnits to exact DeviceSite location Y
*
*/
std::map<PlacementInfo::PlacementUnit *, float> PU2Y;
/**
* @brief record the exact site X (column id) of involved PlacementUnits
*
* unused currently and just for debugging
*
*/
std::map<PlacementInfo::PlacementUnit *, int> PU2SiteX;
/**
* @brief record the column id for the binded cells in involved PlacementUnits
*
* i.e., if a PlacementUnit is PlacementMacro, the cells in it might be allowed to bind to different columns during
* rough legalization.
*
*/
std::map<PlacementInfo::PlacementUnit *, std::vector<int>> PU2Columns;
/**
* @brief the PlacementUnits which shoudl be mapped to BRAM site
*
*/
std::set<PlacementInfo::PlacementUnit *> BRAMPUs;
/**
* @brief the PlacementUnits which shoudl be mapped to DSP site
*
*/
std::set<PlacementInfo::PlacementUnit *> DSPPUs;
/**
* @brief the PlacementUnits which shoudl be mapped to CARRY BEL
*
*/
std::set<PlacementInfo::PlacementUnit *> CARRYPUs;
bool enableBRAMLegalization = false;
bool enableDSPLegalization = false;
bool enableCARRYLegalization = false;
bool verbose = false;
float y2xRatio = 1.0;
bool clockRegionAware = false;
/**
* @brief the average displacement of exact legalization for the involved PlacementUnit
*
*/
float finalAverageDisplacement = 10000.0;
/**
* @brief the average displacement of fixed column (but not exactly consective) legalization for the involved
* PlacementUnit
*
* During the fixed column legalization, cells in a macro will be constrainted on one column.
*
*/
float fixedColumnAverageDisplacement = 10000.0;
/**
* @brief the average displacement of rough legalization for the involved PlacementUnit
*
*/
float roughAverageDisplacement = 10000.0;
/**
* @brief displacement threshold to detect potential legal sites
*
*/
float initialDisplacementThreshold = 30;
/**
* @brief the maximum number of final candidate sites
*
*/
int initialMaxNumCandidate = 30;
/**
* @brief the number of the parallel multi-threading workers to handle the legalization problems
*
*/
int nJobs = 1;
/**
* @brief we are allowed to detect a excessive number (>candidateNum) of initial candidates. candidateFactor is to
* control the excessive ratio.
*
*/
int candidateFactor = 5;
/**
* @brief get the PlacementMacro(s) which SHOULD be legalized
*
*/
void getMacrosToLegalize();
/**
* @brief find available sites for each specific macro type required by the constructor
*
*/
void findMacroType2AvailableSites();
/**
* @brief resolve the overflow columns during fixed column legalization by spreading "outliers" to neighbor columns
*
*/
void resolveOverflowColumns();
/**
* @brief find potential sites for each PlacementUnit
*
* @param fixedColumn true if we want to find potential sites for PlacementUnit in a given column
*/
void findPossibleLegalLocation(bool fixedColumn = false);
/**
* @brief map the macros to the columns according to the locations of the cells in it
*
* @param directLegalization direct legalize the macros without rough legalization phase
*/
void mapMacrosToColumns(bool directLegalization);
/**
* @brief find the closest column for a given location X
*
* @param curX given location X
* @param Xs the location X for the resource columns
* @return int
*/
int findCorrespondingColumn(float curX, std::vector<float> &Xs);
/**
* @brief Create a bipartite graph between PlacementUnit and potential DeviceSites
*
*/
void createBipartiteGraph();
/**
* @brief conduct rough legalization.
*
* Rough legalization does not guarantee that elements in a macro are placed consecutively. During rough
* legalization, each cell in a macro will be "legalized" individually as a general standard cell withouth the shape
* constraints.
*
*/
void roughlyLegalize();
/**
* @brief conduct fixed-column legalization as a step in exact legalization. During fixed-column legalization, cells
* in PlacementUnit (macro) can be only mapped to the same column.
*
* @param directLegalization direct legalize the macros without rough legalization phase
*/
void fixedColumnLegalize(bool directLegalization);
/**
* @brief update the locations of the legalization anchors for the PlacementUnits.
*
* This function might be called more than one time during implementation so we have to specify the type of
* legalization and whether we want to update the displacement value for the control of some optimizations.
*
* @param isRoughLegalization specify the type of legalization
* @param updateDisplacement whether we want to update the displacement value for the control of some optimizations
*/
void updatePUMatchingLocation(bool isRoughLegalization = true, bool updateDisplacement = true);
/**
* @brief finally dynamic programming to legalize the macros which have been mapped to the columns.
*
* This function will call DP function for each specific type of macros
*
*/
void finalLegalizeBasedOnDP();
/**
* @brief DP function for the legalization of a specific type of macros in the same column
*
* @param colNum total number of the column of the target type of PlacementUnit
* @param Column2Sites a vector record the sites in the columns
* @param Column2PUs a vector record the macros in the columns
* @return float
*/
float DPForMinHPWL(int colNum, std::vector<std::vector<DeviceInfo::DeviceSite *>> &Column2Sites,
std::vector<std::deque<PlacementInfo::PlacementUnit *>> &Column2PUs);
/**
* @brief record the matching in private list and update the list of cells which are not matched by the
* bi-partite matching
*
*/
void updateMatchingAndUnmatchedMacroCells();
/**
* @brief spread PlacementUnits accross columns to resolve resource overflow
*
* @param columnNum the number of columns
* @param columnUntilization a vector reording the utilization usage of each column
* @param column2Sites a vector reording device sites in each column
* @param column2PUs a vector reording PlacementUnits in each column
* @param cell2Column a map recording the column id for each PlacementUnit
*/
void spreadMacros(int columnNum, std::vector<int> &columnUntilization,
std::vector<std::vector<DeviceInfo::DeviceSite *>> &column2Sites,
std::vector<std::deque<PlacementInfo::PlacementUnit *>> &column2PUs,
std::map<DesignInfo::DesignCell *, int> &cell2Column, float budgeRatio = 1);
/**
* @brief find the column which contains the most of cells in a macro in a specific range of columns
*
* @param minId the begin column
* @param maxId the end column
* @param ids the column ids of the cells in the macro
* @return int
*/
int findIdMaxWithRecurence(int minId, int maxId, std::vector<int> &ids);
/**
* @brief Set the sites which are binded as mapped so they will not be mapped to other elements in the netlist
*
*/
void setSitesMapped();
/**
* @brief find candidate sites for the cells left to be matched
*
*/
void findMacroCell2SitesInDistance()
{
macro2SitesInDisplacementThreshold.clear();
int macrosNum = macroCellsToLegalize.size();
for (int i = 0; i < macrosNum; i++)
{
macro2SitesInDisplacementThreshold[macroCellsToLegalize[i]] = nullptr;
}
#pragma omp parallel for
for (int i = 0; i < macrosNum; i++)
{
DesignInfo::DesignCell *curCell = macroCellsToLegalize[i];
macro2SitesInDisplacementThreshold[curCell] = placementInfo->findNeiborSiteFromBinGrid(
curCell, cellLoc[curCell->getCellId()].X, cellLoc[curCell->getCellId()].Y, displacementThreshold,
candidateFactor * maxNumCandidate);
}
}
/**
* @brief clear the information of candidate sites for the cells left to be matched
*
*/
void resetMacroCell2SitesInDistance()
{
int macrosNum = macroCellsToLegalize.size();
for (int i = 0; i < macrosNum; i++)
{
DesignInfo::DesignCell *curCell = macroCellsToLegalize[i];
delete macro2SitesInDisplacementThreshold[curCell];
}
}
/**
* @brief clear the mapping information and reset the mapping parameters
*
*/
inline void resetSettings()
{
displacementThreshold = initialDisplacementThreshold;
maxNumCandidate = initialMaxNumCandidate;
matchedMacroCells.clear();
matchedSites.clear();
cellLevelMatching.clear();
PULevelMatching.clear();
}
/**
* @brief check how many sites are required by the given PlacementUnit
*
* @param tmpPUUnit the given PlacementUnit
* @return int
*/
int getMarcroCellNum(PlacementInfo::PlacementUnit *tmpMacroUnit);
inline void swapPU(PlacementInfo::PlacementUnit **A, PlacementInfo::PlacementUnit **B)
{
PlacementInfo::PlacementUnit *C = *A;
*A = *B;
*B = C;
}
void sortPUsByPU2Y(std::deque<PlacementInfo::PlacementUnit *> &PUs);
void sortSitesBySiteY(std::vector<DeviceInfo::DeviceSite *> &sites);
inline float getDisplacement(PlacementInfo::Location ¯oLoc, DeviceInfo::DeviceSite *curSite)
{
return std::fabs(macroLoc.X - curSite->X()) + y2xRatio * std::fabs(macroLoc.Y - curSite->Y());
}
inline float getDisplacement(PlacementInfo::PlacementUnit *curPU, DeviceInfo::DeviceSite *curSite)
{
return std::fabs(curPU->X() - curSite->X()) + y2xRatio * std::fabs(curPU->Y() - curSite->Y());
}
/**
* @brief get the HPWL change when the given DesignCell moves to the given DeviceSite
*
* @param curCell the given DesignCell
* @param curSite the given DeviceSite
* @return float
*/
inline float getHPWLChange(DesignInfo::DesignCell *curCell, DeviceInfo::DeviceSite *curSite)
{
float oriHPWL = 0.0;
float newHPWL = 0.0;
auto tmpPU = placementInfo->getPlacementUnitByCell(curCell);
float PUX = 0.0, PUY = 0.0;
auto nets = placementInfo->getPlacementUnitId2Nets()[tmpPU->getId()];
float numCellsInMacro = 1.0;
if (dynamic_cast<PlacementInfo::PlacementUnpackedCell *>(tmpPU))
{
PUX = curSite->X();
PUY = curSite->Y();
}
else if (PlacementInfo::PlacementMacro *tmpMacro = dynamic_cast<PlacementInfo::PlacementMacro *>(tmpPU))
{
PUX = curSite->X() - tmpMacro->getCellOffsetXInMacro(curCell);
PUY = curSite->Y() - tmpMacro->getCellOffsetYInMacro(curCell);
numCellsInMacro = tmpMacro->getCells().size();
}
for (auto curNet : nets)
{
if (curNet->getDesignNet()->getPins().size() > 1000) // it could be clock
continue;
oriHPWL += curNet->getHPWL(y2xRatio);
newHPWL += curNet->getNewHPWLByTrying(tmpPU, PUX, PUY, y2xRatio);
}
return (newHPWL - oriHPWL) / numCellsInMacro;
// return std::fabs(macroLoc.X - curSite->X()) + std::fabs(macroLoc.Y - curSite->Y());
// placementInfo->getPlacementUnitByCell(curCell);
}
/**
* @brief get the HPWL change when the given PlacementUnit moves to the given DeviceSite
*
* @param tmpPU the given PlacementUnit
* @param curSite the given DeviceSite
* @return float
*/
inline float getHPWLChange(PlacementInfo::PlacementUnit *tmpPU, DeviceInfo::DeviceSite *curSite)
{
float oriHPWL = 0.0;
float newHPWL = 0.0;
float PUX = 0.0, PUY = 0.0;
auto nets = placementInfo->getPlacementUnitId2Nets()[tmpPU->getId()];
PUX = curSite->X();
PUY = curSite->Y();
for (auto curNet : nets)
{
if (curNet->getDesignNet()->getPins().size() > 1000) // it could be clock
continue;
oriHPWL += curNet->getHPWL(y2xRatio);
newHPWL += curNet->getNewHPWLByTrying(tmpPU, PUX, PUY, y2xRatio);
}
return (newHPWL - oriHPWL);
}
/**
* @brief get the HPWL change when the given PlacementUnit moves to the given location
*
* @param tmpPU the given PlacementUnit
* @param PUX given location X
* @param PUY given location Y
* @return float
*/
inline float getHPWLChange(PlacementInfo::PlacementUnit *tmpPU, float PUX, float PUY)
{
float oriHPWL = 0.0;
float newHPWL = 0.0;
auto nets = placementInfo->getPlacementUnitId2Nets()[tmpPU->getId()];
for (auto curNet : nets)
{
if (curNet->getDesignNet()->getPins().size() > 1000) // it could be clock
continue;
oriHPWL += curNet->getHPWL(y2xRatio);
newHPWL += curNet->getNewHPWLByTrying(tmpPU, PUX, PUY, y2xRatio);
}
return (newHPWL - oriHPWL);
}
inline void swapSitePtr(DeviceInfo::DeviceSite **siteA, DeviceInfo::DeviceSite **siteB)
{
DeviceInfo::DeviceSite *tmp = *siteA;
*siteA = *siteB;
*siteB = tmp;
}
inline int sortPartition(DesignInfo::DesignCell *curCell, std::vector<DeviceInfo::DeviceSite *> &sites, int low,
int high, PlacementInfo::Location ¯oLoc)
{
int pivot, index, i;
index = low;
pivot = high;
for (i = low; i < high; i++)
{
// finding index of pivot.
// if (a[i] < a[pivot])
// if (getDisplacement(macroLoc, sites[i]) < getDisplacement(macroLoc, sites[pivot]))
if (getHPWLChange(curCell, sites[i]) < getHPWLChange(curCell, sites[pivot]))
{
swapSitePtr(&sites[i], &sites[index]);
index++;
}
}
swapSitePtr(&sites[pivot], &sites[index]);
return index;
}
inline int RandomPivotPartition(DesignInfo::DesignCell *curCell, std::vector<DeviceInfo::DeviceSite *> &sites,
int low, int high, PlacementInfo::Location ¯oLoc)
{
// Random selection of pivot.
int pvt;
pvt = (high + low) / 2;
// pvt = low + n % (high - low + 1); // Randomizing the pivot value from sub-array.
swapSitePtr(&sites[high], &sites[pvt]);
return sortPartition(curCell, sites, low, high, macroLoc);
}
void quick_sort_WLChange(DesignInfo::DesignCell *curCell, std::vector<DeviceInfo::DeviceSite *> &sites, int p,
int q, PlacementInfo::Location ¯oLoc)
{
// recursively sort the list
int pindex;
if (p < q)
{
pindex = RandomPivotPartition(curCell, sites, p, q, macroLoc); // randomly choose pivot
// Recursively implementing QuickSort.
quick_sort_WLChange(curCell, sites, p, pindex - 1, macroLoc);
quick_sort_WLChange(curCell, sites, pindex + 1, q, macroLoc);
}
}
inline void swapPUs(PlacementInfo::PlacementUnit **PUA, PlacementInfo::PlacementUnit **PUB)
{
PlacementInfo::PlacementUnit *tmp = *PUA;
*PUA = *PUB;
*PUB = tmp;
}
inline int sortPartition(std::vector<PlacementInfo::PlacementUnit *> &PUs, int low, int high)
{
int pivot, index, i;
index = low;
pivot = high;
for (i = low; i < high; i++)
{
// finding index of pivot.
// if (a[i] < a[pivot])
// if (getDisplacement(macroLoc, PUs[i]) < getDisplacement(macroLoc, PUs[pivot]))
if (PUs[i]->X() < PUs[pivot]->X())
{
swapPUs(&PUs[i], &PUs[index]);
index++;
}
}
swapPUs(&PUs[pivot], &PUs[index]);
return index;
}
inline int RandomPivotPartition(std::vector<PlacementInfo::PlacementUnit *> &PUs, int low, int high)
{
// Random selection of pivot.
int pvt, n;
n = random();
pvt = low + n % (high - low + 1); // Randomizing the pivot value from sub-array.
swapPUs(&PUs[high], &PUs[pvt]);
return sortPartition(PUs, low, high);
}
void quick_sort_locX(std::vector<PlacementInfo::PlacementUnit *> &PUs, int p, int q)
{
// recursively sort the list
int pindex;
if (p < q)
{
pindex = RandomPivotPartition(PUs, p, q); // randomly choose pivot
// Recursively implementing QuickSort.
quick_sort_locX(PUs, p, pindex - 1);
quick_sort_locX(PUs, pindex + 1, q);
}
}
};
#endif |
image-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickCore Image View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/MagickCore.h"
#include "magick/exception-private.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Typedef declarations.
*/
struct _ImageView
{
char
*description;
RectangleInfo
extent;
Image
*image;
CacheView
*view;
size_t
number_threads;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageView() makes a copy of the specified image view.
%
% The format of the CloneImageView method is:
%
% ImageView *CloneImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *CloneImageView(const ImageView *image_view)
{
ImageView
*clone_view;
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->description=ConstantString(image_view->description);
clone_view->extent=image_view->extent;
clone_view->view=CloneCacheView(image_view->view);
clone_view->number_threads=image_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,image_view->exception);
clone_view->debug=image_view->debug;
clone_view->signature=MagickCoreSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageView() deallocates memory associated with a image view.
%
% The format of the DestroyImageView method is:
%
% ImageView *DestroyImageView(ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *DestroyImageView(ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
if (image_view->description != (char *) NULL)
image_view->description=DestroyString(image_view->description);
image_view->view=DestroyCacheView(image_view->view);
image_view->exception=DestroyExceptionInfo(image_view->exception);
image_view->signature=(~MagickCoreSignature);
image_view=(ImageView *) RelinquishMagickMemory(image_view);
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferImageViewIterator() iterates over three image views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination image view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source,
% const ImageView *duplex,ImageView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferImageViewIterator method is:
%
% MagickBooleanType DuplexTransferImageViewIterator(ImageView *source,
% ImageView *duplex,ImageView *destination,
% DuplexTransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o duplex: the duplex image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType DuplexTransferImageViewIterator(
ImageView *source,ImageView *duplex,ImageView *destination,
DuplexTransferImageViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (DuplexTransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticIndexes() returns the image view authentic indexes.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% IndexPacket *GetImageViewAuthenticIndexes(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport IndexPacket *GetImageViewAuthenticIndexes(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticIndexQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticPixels() returns the image view authentic pixels.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% PixelPacket *GetImageViewAuthenticPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport PixelPacket *GetImageViewAuthenticPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a image view.
%
% The format of the GetImageViewException method is:
%
% char *GetImageViewException(const PixelImage *image_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o image_view: the pixel image_view.
%
% o severity: the severity of the error is returned here.
%
*/
MagickExport char *GetImageViewException(const ImageView *image_view,
ExceptionType *severity)
{
char
*description;
assert(image_view != (const ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
assert(severity != (ExceptionType *) NULL);
*severity=image_view->exception->severity;
description=(char *) AcquireQuantumMemory(MaxTextExtent,
2*sizeof(*description));
if (description == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*description='\0';
if (image_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->reason),
MaxTextExtent);
if (image_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewExtent() returns the image view extent.
%
% The format of the GetImageViewExtent method is:
%
% RectangleInfo GetImageViewExtent(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewImage() returns the image associated with the image view.
%
% The format of the GetImageViewImage method is:
%
% MagickCore *GetImageViewImage(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Image *GetImageViewImage(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewIterator() iterates over the image view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetImageViewIterator method is:
%
% MagickBooleanType GetImageViewIterator(ImageView *source,
% GetImageViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType GetImageViewIterator(ImageView *source,
GetImageViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (get == (GetImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
const PixelPacket
*pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualIndexes() returns the image view virtual indexes.
%
% The format of the GetImageViewVirtualIndexes method is:
%
% const IndexPacket *GetImageViewVirtualIndexes(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const IndexPacket *GetImageViewVirtualIndexes(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualIndexQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualPixels() returns the image view virtual pixels.
%
% The format of the GetImageViewVirtualPixels method is:
%
% const PixelPacket *GetImageViewVirtualPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const PixelPacket *GetImageViewVirtualPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageView() returns MagickTrue if the parameter is verified as a image
% view object.
%
% The format of the IsImageView method is:
%
% MagickBooleanType IsImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport MagickBooleanType IsImageView(const ImageView *image_view)
{
if (image_view == (const ImageView *) NULL)
return(MagickFalse);
if (image_view->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageView() returns a image view required for all other methods in the
% Image View API.
%
% The format of the NewImageView method is:
%
% ImageView *NewImageView(MagickCore *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
MagickExport ImageView *NewImageView(Image *image)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view));
if (image_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->image=image;
image_view->exception=AcquireExceptionInfo();
image_view->view=AcquireVirtualCacheView(image_view->image,
image_view->exception);
image_view->extent.width=image->columns;
image_view->extent.height=image->rows;
image_view->extent.x=0;
image_view->extent.y=0;
image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageViewRegion() returns a image view required for all other methods
% in the Image View API.
%
% The format of the NewImageViewRegion method is:
%
% ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view));
if (image_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->exception=AcquireExceptionInfo();
image_view->view=AcquireVirtualCacheView(image_view->image,
image_view->exception);
image_view->image=image;
image_view->extent.width=width;
image_view->extent.height=height;
image_view->extent.x=x;
image_view->extent.y=y;
image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewDescription() associates a description with an image view.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewDescription(ImageView *image_view,
% const char *description)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o description: the image view description.
%
*/
MagickExport void SetImageViewDescription(ImageView *image_view,
const char *description)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
image_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewIterator() iterates over the image view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetImageViewIterator method is:
%
% MagickBooleanType SetImageViewIterator(ImageView *destination,
% SetImageViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the image view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination,
SetImageViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (ImageView *) NULL);
assert(destination->signature == MagickCoreSignature);
if (set == (SetImageViewMethod) NULL)
return(MagickFalse);
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (destination->extent.height-destination->extent.y);
#endif
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,destination->description,
progress,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewThreads() sets the number of threads in a thread team.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewThreads(ImageView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetImageViewThreads(ImageView *image_view,
const size_t number_threads)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
image_view->number_threads=number_threads;
if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource))
image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferImageViewIterator() iterates over two image views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination image view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const ImageView *source,
% ImageView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferImageViewIterator method is:
%
% MagickBooleanType TransferImageViewIterator(ImageView *source,
% ImageView *destination,TransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source,
ImageView *destination,TransferImageViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (TransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const PixelPacket
*magick_restrict pixels;
PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateImageViewIterator() iterates over the image view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateImageViewIterator method is:
%
% MagickBooleanType UpdateImageViewIterator(ImageView *source,
% UpdateImageViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source,
UpdateImageViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (update == (UpdateImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
continue;
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
mwac_utils.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
//#include <plot.h>
#include "mwac_utils.h"
#include "antenna_mapping.h"
void fill_mapping_matrix() {
extern map_t corr_mapping[NINPUT][NINPUT];
extern int pfb_output_to_input[NINPUT];
extern int single_pfb_mapping[64];
extern int npol;
extern int nstation;
int inp1 = 0, inp2 = 0;
int pol1 = 0, pol2 = 0;
int index1 = 0, index2 = 0;
int p=0,npfb = 4;
// Output matrix has ordering
// [channel][station][station][polarization][polarization][complexity]
for (p=0;p<npfb;p++) {
for (inp1=0;inp1<64;inp1++) {
pfb_output_to_input[(p*64) + inp1] = single_pfb_mapping[inp1] + (p*64);
}
}
for (inp1 = 0; inp1 < nstation; inp1++) {
for (inp2 = 0; inp2 < nstation; inp2++) {
for (pol1 = 0; pol1 < npol; pol1++) {
for (pol2 = 0; pol2 < npol; pol2++) {
index1 = inp1 * npol + pol1;
index2 = inp2 * npol + pol2;
/*
fprintf(stdout,
"inp1 %d pol1 %d inp2 %d pol2 %d map to index1 %d and index2 %d\n",
inp1, pol1, inp2, pol2, index1, index2);
fprintf(stdout,
"these map to PFB input numbers: %d and %d\n",
pfb_output_to_input[index1],
pfb_output_to_input[index2]);
*/
corr_mapping[pfb_output_to_input[index1]][pfb_output_to_input[index2]].stn1 =
inp1; // this should give us the pfb input
corr_mapping[pfb_output_to_input[index1]][pfb_output_to_input[index2]].stn2 =
inp2;
corr_mapping[pfb_output_to_input[index1]][pfb_output_to_input[index2]].pol1 =
pol1;
corr_mapping[pfb_output_to_input[index1]][pfb_output_to_input[index2]].pol2 =
pol2;
}
}
}
}
}
void get_baseline(int st1, int st2, int pol1, int pol2, float complex *data,
float complex *baseline) {
int i, j, k, l, m;
float complex *in, *out;
extern int npol;
extern int nstation;
extern int nfrequency;
in = data;
out = baseline;
for (i = 0; i < nfrequency; i++) {
for (j = 0; j < nstation; j++) {
for (k = 0; k < nstation; k++) {
for (l = 0; l < npol; l++) {
for (m = 0; m < npol; m++) {
if (j == st1 && k == st2) {
if (l == pol1 && m == pol2) {
*out = *in;
out++;
// fprintf(stdout,"%f %f\n",crealf(*in),cimagf(*in));
}
}
in++;
}
}
}
}
}
}
void get_baseline_lu(int st1, int st2, int pol1, int pol2, float complex *data,
float complex *baseline) {
int i=0;
float complex *in, *out;
extern int npol;
extern int nstation;
extern int nfrequency;
off_t in_index=0,offset,stride;
in = data;
out = baseline;
/* direct lookup */
// offset = (st1*nstation*npol*npol) + (st2*npol*npol) + (pol1*npol) + pol2;
offset = npol*((st1*nstation*npol) + (st2*npol) + pol1) + pol2;
stride = (nstation*nstation*npol*npol);
for (i=0;i<nfrequency;i++) {
in_index = i*stride + offset;
out[i] = in[in_index];
}
}
void get_baseline_r(int st1, int st2, int pol1, int pol2, float complex *data,
float complex *reorder,int npol, int nstation, int nfrequency,int true_st1,int true_st2,
int true_pol1,int true_pol2,int conjugate) {
int i=0;
float complex *in, *out;
size_t out_index =0, in_index=0;;
in = data;
out = reorder;
/* direct lookup */
for (i=0;i<nfrequency;i++) {
in_index = i*(nstation*nstation*npol*npol) + (st1*nstation*npol*npol) + (st2*npol*npol) + (pol1*npol) + pol2;
out_index = i*(nstation*(nstation+1)*npol*npol/2) + (((true_st1*nstation) - ((true_st1+1)/2)*true_st1) + true_st2)*npol*npol + (pol1*npol) + pol2;
if (!conjugate) {
out[out_index] = in[in_index];
}
else {
if (st2>st1) {
out[out_index] = conj(in[in_index]);
}
}
}
}
// full reorder using the correct mapping - takes the input cube and produces a packed triangular output
// in the correct order
// wacky packed tile order to packed triangular
void full_reorder(float complex *full_matrix_h, float complex *reordered)
{
extern int npol;
extern int nstation;
extern int nfrequency;
extern map_t corr_mapping[NINPUT][NINPUT];
int t1=0;
int t2=0;
int p1=0;
int p2=0;
long long baseline_count = 0;
for (t1 = 0; t1 < nstation; t1++) {
for (t2 = t1; t2 < nstation; t2++) {
for (p1 = 0;p1 < npol;p1++) {
for (p2 =0; p2 < npol; p2++) {
baseline_count++;
int index1 = t1 * npol + p1;
int index2 = t2 * npol + p2;
/*
fprintf(stdout, "requesting ant1 %d ant 2 %d pol1 %d pol2 %d",
antenna1, antenna2, pol1, pol2);
*/
map_t the_mapping = corr_mapping[index1][index2];
int conjugate = 0;
/*
fprintf(stdout,
"input ant/pol combination decodes to stn1 %d stn2 %d pol1 %d pol2 %d\n",
the_mapping.stn1, the_mapping.stn2, the_mapping.pol1,
the_mapping.pol2);
*/
if (the_mapping.stn2 > the_mapping.stn1) {
conjugate = 1;
}
else {
conjugate = 0;
}
get_baseline_r(the_mapping.stn1, the_mapping.stn2, the_mapping.pol1,
the_mapping.pol2, full_matrix_h, reordered,npol,nstation,nfrequency,conjugate,t1,t2,p1,p2);
}
}
}
}
// now reoredered should contain a triagular packed array in the correct order
}
// Extracts the full matrix from the packed Hermitian form
void extractMatrix(float complex *matrix, float complex *packed) {
int f;
extern int npol;
extern int nstation;
extern int nfrequency;
/* use openmp to parallelise this. In single threaded version, this task takes 1/3 the overall CPU time,
so 4 threads should be plenty to make this negligible
*/
omp_set_num_threads(4);
#pragma omp parallel private (f)
{
#pragma omp for
for (f = 0; f < nfrequency; f++) {
int i,j,pol1,pol2;
for (i = 0; i < nstation; i++) {
for (j = 0; j <= i; j++) {
int k = f * (nstation + 1) * (nstation / 2) + i * (i + 1) / 2 + j;
for (pol1 = 0; pol1 < npol; pol1++) {
for (pol2 = 0; pol2 < npol; pol2++) {
int index = (k * npol + pol1) * npol + pol2;
matrix[(((f * nstation + i) * nstation + j) * npol + pol1) * npol + pol2] = packed[index];
matrix[(((f * nstation + j) * nstation + i) * npol + pol2) * npol + pol1] = conjf(packed[index]);
// printf("f:%d s1:%d s2:%d %d p1:%d p2:%d %d\n",f,i,j,k,pol1,pol2,index);
}
}
}
}
}
} // end openmp
}
void extractMatrix_slow(float complex *matrix, float complex *packed) {
int f, i, j, pol1, pol2;
extern int npol;
extern int nstation;
extern int nfrequency;
int in_index=0;
int out_index=0;
int out_index_conj=0;
for (f = 0; f < nfrequency; f++) {
for (i = 0; i < nstation; i++) {
for (j = 0; j <= i; j++) {
for (pol1 = 0; pol1 < npol; pol1++) {
for (pol2 = 0; pol2 < npol; pol2++) {
out_index = f*(nstation*nstation*npol*npol) + i*(nstation*npol*npol) + j*(npol*npol) + pol1*(npol) + pol2;
out_index_conj = f*(nstation*nstation*npol*npol) + j*(nstation*npol*npol) + i*(npol*npol) + pol1*(npol) + pol2;
matrix[out_index] = packed[in_index];
matrix[out_index_conj] = conjf(packed[in_index]);
in_index++;
}
}
}
}
}
}
|
WinogradConv2D.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
#include <CL/cl.h>
#include "../polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define MAX_SOURCE_SIZE (0x100000)
/* Problem size */
#define N 1024
// #define NI 8192
// #define NJ 8192
/* Thread block dimensions */
#define DIM_LOCAL_WORK_GROUP_X 32
#define DIM_LOCAL_WORK_GROUP_Y 8
#if defined(cl_khr_fp64) // Khronos extension available?
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#elif defined(cl_amd_fp64) // AMD extension available?
#pragma OPENCL EXTENSION cl_amd_fp64 : enable
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
char str_temp[1024];
cl_platform_id platform_id;
cl_device_id device_id;
cl_uint num_devices;
cl_uint num_platforms;
cl_int errcode;
cl_context clGPUContext;
cl_kernel clKernel;
cl_command_queue clCommandQue;
cl_program clProgram;
cl_mem a_mem_obj;
cl_mem b_mem_obj;
cl_mem c_mem_obj;
FILE* fp;
char* source_str;
size_t source_size;
double total_time = 0;
int cpu_offset;
void WinogradConv2D_2x2_omp(DATA_TYPE* input, DATA_TYPE* output, DATA_TYPE* transformed_filter, size_t* cpu_global_size);
void read_cl_file() {
// Load the kernel source code into the array source_str
fp = fopen("WinogradConv2D_2x2.cl", "r");
if (!fp) {
fprintf(stdout, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread(source_str, 1, MAX_SOURCE_SIZE, fp);
fclose(fp);
}
void init(DATA_TYPE* A) {
int i, j;
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
A[i * N + j] = (float)rand() / RAND_MAX;
}
}
}
void cl_initialization() {
// Get platform and device information
errcode = clGetPlatformIDs(1, &platform_id, &num_platforms);
if (errcode != CL_SUCCESS)
printf("Error getting platform IDs\n");
errcode = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, &num_devices);
if (errcode != CL_SUCCESS)
printf("Error getting device IDs\n");
// Create an OpenCL context
clGPUContext = clCreateContext(NULL, 1, &device_id, NULL, NULL, &errcode);
if (errcode != CL_SUCCESS)
printf("Error in creating context\n");
//Create a command-queue
clCommandQue = clCreateCommandQueue(clGPUContext, device_id, 0, &errcode);
if (errcode != CL_SUCCESS)
printf("Error in creating command queue\n");
}
void cl_mem_init(DATA_TYPE* A, DATA_TYPE* C) {
a_mem_obj = clCreateBuffer(clGPUContext, CL_MEM_READ_ONLY, sizeof(DATA_TYPE) * N * N, NULL, &errcode);
b_mem_obj = clCreateBuffer(clGPUContext, CL_MEM_READ_WRITE, sizeof(DATA_TYPE) * (N - 2) * (N - 2), NULL, &errcode);
// transformed filter
c_mem_obj = clCreateBuffer(clGPUContext, CL_MEM_READ_ONLY, sizeof(DATA_TYPE) * 4 * 4, NULL, &errcode);
if (errcode != CL_SUCCESS)
printf("Error in creating buffers\n");
errcode = clEnqueueWriteBuffer(clCommandQue, a_mem_obj, CL_TRUE, 0, sizeof(DATA_TYPE) * N * N, A, 0, NULL, NULL);
if (errcode != CL_SUCCESS)
printf("Error in writing buffers\n");
// transformed filter
errcode = clEnqueueWriteBuffer(clCommandQue, c_mem_obj, CL_TRUE, 0, sizeof(DATA_TYPE) * 4 * 4, C, 0, NULL, NULL);
if (errcode != CL_SUCCESS)
printf("Error in writing buffers\n");
}
void cl_load_prog() {
// Create a program from the kernel source
clProgram = clCreateProgramWithSource(clGPUContext, 1, (const char**)&source_str, (const size_t*)&source_size, &errcode);
if (errcode != CL_SUCCESS)
printf("Error in creating program\n");
// Build the program
errcode = clBuildProgram(clProgram, 1, &device_id, NULL, NULL, NULL);
if (errcode != CL_SUCCESS)
printf("Error in building program\n");
// Create the OpenCL kernel
clKernel = clCreateKernel(clProgram, "WinogradConv2D_2x2_kernel", &errcode);
if (errcode != CL_SUCCESS)
printf("Error in creating kernel\n");
clFinish(clCommandQue);
}
void cl_launch_kernel() {
double t_start, t_end;
int in_map_size = N;
int out_map_size = N - 2;
int tile_n = (out_map_size + 1) / 2;
size_t localWorkSize[2], globalWorkSize[2];
localWorkSize[0] = DIM_LOCAL_WORK_GROUP_X;
localWorkSize[1] = DIM_LOCAL_WORK_GROUP_Y;
globalWorkSize[0] = (size_t)ceil(((float)tile_n) / ((float)DIM_LOCAL_WORK_GROUP_X)) * DIM_LOCAL_WORK_GROUP_X;
globalWorkSize[1] = (size_t)ceil(((float)tile_n) / ((float)DIM_LOCAL_WORK_GROUP_Y)) * DIM_LOCAL_WORK_GROUP_Y;
size_t cpu_global_size[2];
cpu_global_size[0] = cpu_offset * (size_t)ceil(((float)tile_n) / ((float)DIM_LOCAL_WORK_GROUP_X)) / 100 * DIM_LOCAL_WORK_GROUP_X; // 这里
cpu_global_size[1] = globalWorkSize[1];
size_t gpu_global_size[2];
gpu_global_size[0] = globalWorkSize[0] - cpu_global_size[0];
gpu_global_size[1] = globalWorkSize[1];
size_t global_offset[2];
global_offset[0] = cpu_global_size[0];
// global_offset[1] = 1;
global_offset[1] = 0;
bool cpu_run = false, gpu_run = false;
if (cpu_global_size[0] > 0) {
cpu_run = true;
}
if (gpu_global_size[0] > 0) {
gpu_run = true;
}
t_start = rtclock();
DATA_TYPE* b_mem_cpu;
DATA_TYPE* a_mem_cpu;
DATA_TYPE* c_mem_cpu;
cl_event kernelEvent1;
if (gpu_run) {
// Set the arguments of the kernel
errcode = clSetKernelArg(clKernel, 0, sizeof(cl_mem), (void*)&a_mem_obj);
errcode |= clSetKernelArg(clKernel, 1, sizeof(cl_mem), (void*)&b_mem_obj);
errcode |= clSetKernelArg(clKernel, 2, sizeof(cl_mem), (void*)&c_mem_obj);
errcode |= clSetKernelArg(clKernel, 3, sizeof(int), &in_map_size);
errcode |= clSetKernelArg(clKernel, 4, sizeof(int), &out_map_size);
if (errcode != CL_SUCCESS)
printf("Error in seting arguments\n");
errcode = clEnqueueNDRangeKernel(clCommandQue, clKernel, 2, global_offset, gpu_global_size, localWorkSize, 0, NULL, &kernelEvent1);
t_start = rtclock();
if (errcode != CL_SUCCESS)
printf("Error in launching kernel\n");
}
if (cpu_run) {
b_mem_cpu = (DATA_TYPE*)malloc(sizeof(DATA_TYPE) * (N - 2) * (N - 2));
c_mem_cpu = (DATA_TYPE*)malloc(sizeof(DATA_TYPE) * 4 * 4);
a_mem_cpu = (DATA_TYPE*)malloc(N * N * sizeof(DATA_TYPE));
errcode = clEnqueueReadBuffer(clCommandQue, a_mem_obj, CL_TRUE, 0,
sizeof(DATA_TYPE) * N * N, a_mem_cpu, 0, NULL, NULL);
errcode |= clEnqueueReadBuffer(clCommandQue, c_mem_obj, CL_TRUE, 0,
sizeof(DATA_TYPE) * 4 * 4, c_mem_cpu, 0, NULL, NULL);
if (errcode != CL_SUCCESS)
printf("Error in read buffer\n");
// printf("CPU size: %d\n", cpu_global_size[0]);
WinogradConv2D_2x2_omp(a_mem_cpu, b_mem_cpu, c_mem_cpu, cpu_global_size);
// errcode = clEnqueueWriteBuffer(clCommandQue, b_mem_obj, CL_TRUE, global_offset[0],
// sizeof(DATA_TYPE) * (N-2) * (N-2), b_mem_cpu, 0, NULL, NULL);
if (gpu_run) {
errcode = clEnqueueWriteBuffer(clCommandQue, b_mem_obj, CL_TRUE, 0,
sizeof(DATA_TYPE) * global_offset[0] * 2 * (N - 2), b_mem_cpu, 0, NULL, NULL);
} else {
errcode = clEnqueueWriteBuffer(clCommandQue, b_mem_obj, CL_TRUE, 0,
sizeof(DATA_TYPE) * (N - 2) * (N - 2), b_mem_cpu, 0, NULL, NULL);
}
if (errcode != CL_SUCCESS)
printf("Error in write buffer\n");
}
if (gpu_run) {
cl_int err = clWaitForEvents(1, &kernelEvent1);
if (err != CL_SUCCESS)
printf("ERROR in corun\n");
}
if (cpu_run) {
free(b_mem_cpu);
free(c_mem_cpu); // 这里
free(a_mem_cpu); // 这里
}
t_end = rtclock();
total_time += 1000.0 * (t_end - t_start);
// fprintf(stdout, "Total time: %lf ms\n", 1000.0 * (t_end - t_start));
}
void cl_clean_up() {
// Clean up
errcode = clFlush(clCommandQue);
errcode = clFinish(clCommandQue);
errcode = clReleaseKernel(clKernel);
errcode = clReleaseProgram(clProgram);
errcode = clReleaseMemObject(a_mem_obj);
errcode = clReleaseMemObject(b_mem_obj);
errcode = clReleaseMemObject(c_mem_obj);
errcode = clReleaseCommandQueue(clCommandQue);
errcode = clReleaseContext(clGPUContext);
if (errcode != CL_SUCCESS)
printf("Error in cleanup\n");
}
// F(2x2,3x3)
void WinogradConv2D_2x2_filter_transformation(DATA_TYPE* transformed_filter) {
DATA_TYPE filter[3][3];
filter[0][0] = +0.2;
filter[1][0] = +0.5;
filter[2][0] = -0.8;
filter[0][1] = -0.3;
filter[1][1] = +0.6;
filter[2][1] = -0.9;
filter[0][2] = +0.4;
filter[1][2] = +0.7;
filter[2][2] = +0.10;
// filter transformation
DATA_TYPE tmp_filter[4][3];
// const float G[4][3] = {
// {1.0f, 0.0f, 0.0f},
// {0.5f, 0.5f, 0.5f},
// {0.5f, -0.5f, 0.5f},
// {0.0f, 0.0f, 1.0f}
// };
// G * g
for (int j = 0; j < 3; j++) {
tmp_filter[0][j] = filter[0][j];
tmp_filter[1][j] = 0.5f * filter[0][j] + 0.5f * filter[1][j] + 0.5f * filter[2][j];
tmp_filter[2][j] = 0.5f * filter[0][j] - 0.5f * filter[1][j] + 0.5f * filter[2][j];
tmp_filter[3][j] = filter[2][j];
}
// g * Gt
for (int i = 0; i < 4; i++) {
transformed_filter[i * 4 + 0] = tmp_filter[i][0];
transformed_filter[i * 4 + 1] = 0.5f * tmp_filter[i][0] + 0.5f * tmp_filter[i][1] + 0.5f * tmp_filter[i][2];
transformed_filter[i * 4 + 2] = 0.5f * tmp_filter[i][0] - 0.5f * tmp_filter[i][1] + 0.5f * tmp_filter[i][2];
transformed_filter[i * 4 + 3] = tmp_filter[i][2];
}
}
void WinogradConv2D_2x2_omp(DATA_TYPE* input, DATA_TYPE* output, DATA_TYPE* transformed_filter, size_t* cpu_global_size) {
// DATA_TYPE trasformed_filter[4][4];
// WinogradConv2D_2x2_filter_transformation(trasformed_filter);
int out_map_size = N - 2;
int tile_n = (out_map_size + 1) / 2;
// for (int tile_i = 0; tile_i < tile_n; tile_i ++) {
// for (int tile_j = 0; tile_j < cpu_global_size[0]; tile_j ++) {
#pragma omp parallel
for (int tile_i = 0; tile_i < cpu_global_size[0]; tile_i++) {
#pragma omp for
for (int tile_j = 0; tile_j < tile_n; tile_j++) {
// input transformation
DATA_TYPE input_tile[4][4], tmp_tile[4][4], transformed_tile[4][4];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
int x = 2 * tile_i + i;
int y = 2 * tile_j + j;
if (x >= N || y >= N) {
input_tile[i][j] = 0;
continue;
}
input_tile[i][j] = input[x * N + y];
}
}
// const float Bt[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, 0.0f, -1.0f}
// }
// Bt * d
// #pragma omp simd
for (int j = 0; j < 4; j++) {
tmp_tile[0][j] = input_tile[0][j] - input_tile[2][j];
tmp_tile[1][j] = input_tile[1][j] + input_tile[2][j];
tmp_tile[2][j] = -input_tile[1][j] + input_tile[2][j];
tmp_tile[3][j] = input_tile[1][j] - input_tile[3][j];
}
// d * B
// #pragma omp simd
for (int i = 0; i < 4; i++) {
transformed_tile[i][0] = tmp_tile[i][0] - tmp_tile[i][2];
transformed_tile[i][1] = tmp_tile[i][1] + tmp_tile[i][2];
transformed_tile[i][2] = -tmp_tile[i][1] + tmp_tile[i][2];
transformed_tile[i][3] = tmp_tile[i][1] - tmp_tile[i][3];
}
// element-wise multiplication
DATA_TYPE multiplied_tile[4][4];
for (int i = 0; i < 4; i++) {
// #pragma omp simd
for (int j = 0; j < 4; j++) {
multiplied_tile[i][j] = transformed_tile[i][j] * transformed_filter[i * 4 + j];
}
}
// output transformation
DATA_TYPE tmp_tile_1[2][4], final_tile[2][2];
// const float At[2][4] {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, -1.0f}
// }
// At * I
// #pragma omp simd
for (int j = 0; j < 4; j++) {
tmp_tile_1[0][j] = multiplied_tile[0][j] + multiplied_tile[1][j] + multiplied_tile[2][j];
tmp_tile_1[1][j] = multiplied_tile[1][j] - multiplied_tile[2][j] - multiplied_tile[3][j];
}
// I * A
// #pragma omp simd
for (int i = 0; i < 2; i++) {
final_tile[i][0] = tmp_tile_1[i][0] + tmp_tile_1[i][1] + tmp_tile_1[i][2];
final_tile[i][1] = tmp_tile_1[i][1] - tmp_tile_1[i][2] - tmp_tile_1[i][3];
}
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
int x = 2 * tile_i + i;
int y = 2 * tile_j + j;
if (x >= out_map_size || y >= out_map_size) {
continue;
}
output[x * out_map_size + y] = final_tile[i][j];
}
}
} // for tile_i
} // for tile_j
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu) {
int i, j, fail;
fail = 0;
// Compare a and b
for (i = 0; i < (N - 2); i++) {
for (j = 0; j < (N - 2); j++) {
if (percentDiff(B[i * (N - 2) + j], B_outputFromGpu[i * (N - 2) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// Print results
printf("Error Threshold of %4.2f Percent: %d\n\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void WinogradConv2D_2x2(DATA_TYPE* input, DATA_TYPE* output, DATA_TYPE* transformed_filter) {
// DATA_TYPE trasformed_filter[4][4];
// WinogradConv2D_2x2_filter_transformation(trasformed_filter);
int out_map_size = N - 2;
int tile_n = (out_map_size + 1) / 2;
for (int tile_i = 0; tile_i < tile_n; tile_i++) {
for (int tile_j = 0; tile_j < tile_n; tile_j++) {
// input transformation
DATA_TYPE input_tile[4][4], tmp_tile[4][4], transformed_tile[4][4];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
int x = 2 * tile_i + i;
int y = 2 * tile_j + j;
if (x >= N || y >= N) {
input_tile[i][j] = 0;
continue;
}
input_tile[i][j] = input[x * N + y];
}
}
// const float Bt[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, 0.0f, -1.0f}
// }
// Bt * d
for (int j = 0; j < 4; j++) {
tmp_tile[0][j] = input_tile[0][j] - input_tile[2][j];
tmp_tile[1][j] = input_tile[1][j] + input_tile[2][j];
tmp_tile[2][j] = -input_tile[1][j] + input_tile[2][j];
tmp_tile[3][j] = input_tile[1][j] - input_tile[3][j];
}
// d * B
for (int i = 0; i < 4; i++) {
transformed_tile[i][0] = tmp_tile[i][0] - tmp_tile[i][2];
transformed_tile[i][1] = tmp_tile[i][1] + tmp_tile[i][2];
transformed_tile[i][2] = -tmp_tile[i][1] + tmp_tile[i][2];
transformed_tile[i][3] = tmp_tile[i][1] - tmp_tile[i][3];
}
// element-wise multiplication
DATA_TYPE multiplied_tile[4][4];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
multiplied_tile[i][j] = transformed_tile[i][j] * transformed_filter[i * 4 + j];
}
}
// output transformation
DATA_TYPE tmp_tile_1[2][4], final_tile[2][2];
// const float At[2][4] {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, -1.0f}
// }
// At * I
for (int j = 0; j < 4; j++) {
tmp_tile_1[0][j] = multiplied_tile[0][j] + multiplied_tile[1][j] + multiplied_tile[2][j];
tmp_tile_1[1][j] = multiplied_tile[1][j] - multiplied_tile[2][j] - multiplied_tile[3][j];
}
// I * A
for (int i = 0; i < 2; i++) {
final_tile[i][0] = tmp_tile_1[i][0] + tmp_tile_1[i][1] + tmp_tile_1[i][2];
final_tile[i][1] = tmp_tile_1[i][1] - tmp_tile_1[i][2] - tmp_tile_1[i][3];
}
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
int x = 2 * tile_i + i;
int y = 2 * tile_j + j;
if (x >= out_map_size || y >= out_map_size) {
continue;
}
output[x * out_map_size + y] = final_tile[i][j];
}
}
} // for tile_i
} // for tile_j
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("usage: ./WinogradConv2D <cpu offset>\n");
exit(0);
}
cpu_offset = atoi(argv[1]);
double t_start, t_end;
int i;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
DATA_TYPE* C;
A = (DATA_TYPE*)malloc(N * N * sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc((N - 2) * (N - 2) * sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc((N - 2) * (N - 2) * sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(4 * 4 * sizeof(DATA_TYPE));
WinogradConv2D_2x2_filter_transformation(C);
init(A);
read_cl_file();
cl_initialization();
t_start = rtclock();
cl_mem_init(A, C);
cl_load_prog();
for (int i = 0; i < 3; i++) {
cl_launch_kernel();
}
errcode = clEnqueueReadBuffer(clCommandQue, b_mem_obj, CL_TRUE, 0, (N - 2) * (N - 2) * sizeof(DATA_TYPE), B_outputFromGpu, 0, NULL, NULL);
if (errcode != CL_SUCCESS)
printf("Error in reading GPU mem\n");
cl_clean_up();
t_end = rtclock();
// printf("Total kernel time: %lf\n", total_time);
printf("CPU offset: %d\n", cpu_offset);
printf("Total time: %lf ms\n", 1000.0 * (t_end - t_start));
WinogradConv2D_2x2(A, B, C);
compareResults(B, B_outputFromGpu);
free(A);
free(B);
free(B_outputFromGpu);
free(C);
return 0;
}
|
core_slantr.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlantr.c, normal z -> s, Fri Sep 28 17:38:21 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
__attribute__((weak))
void plasma_core_slantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
const float *A, int lda,
float *work, float *value)
{
// Due to a bug in LAPACKE < 3.6.1, this function always returns zero.
// *value = LAPACKE_slantr_work(LAPACK_COL_MAJOR,
// lapack_const(norm), lapack_const(uplo),
// lapack_const(diag),
// m, n, A, lda, work);
// Calling LAPACK directly instead.
char nrm = lapack_const(norm);
char upl = lapack_const(uplo);
char dia = lapack_const(diag);
*value = LAPACK_slantr(&nrm, &upl, &dia, &m, &n, A, &lda, work);
}
/******************************************************************************/
void plasma_core_omp_slantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
const float *A, int lda,
float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:1])
{
if (sequence->status == PlasmaSuccess)
plasma_core_slantr(norm, uplo, diag, m, n, A, lda, work, value);
}
}
/******************************************************************************/
void plasma_core_omp_slantr_aux(plasma_enum_t norm, plasma_enum_t uplo,
plasma_enum_t diag,
int m, int n,
const float *A, int lda,
float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
switch (norm) {
case PlasmaOneNorm:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:n])
{
if (sequence->status == PlasmaSuccess) {
if (uplo == PlasmaUpper) {
if (diag == PlasmaNonUnit) {
for (int j = 0; j < n; j++) {
value[j] = fabsf(A[lda*j]);
for (int i = 1; i < imin(j+1, m); i++) {
value[j] += fabsf(A[lda*j+i]);
}
}
}
else { // PlasmaUnit
int j;
for (j = 0; j < imin(n, m); j++) {
value[j] = 1.0;
for (int i = 0; i < j; i++) {
value[j] += fabsf(A[lda*j+i]);
}
}
for (; j < n; j++) {
value[j] = fabsf(A[lda*j]);
for (int i = 1; i < m; i++) {
value[j] += fabsf(A[lda*j+i]);
}
}
}
}
else { // PlasmaLower
if (diag == PlasmaNonUnit) {
int j;
for (j = 0; j < imin(n, m); j++) {
value[j] = fabsf(A[lda*j+j]);
for (int i = j+1; i < m; i++) {
value[j] += fabsf(A[lda*j+i]);
}
}
for (; j < n; j++)
value[j] = 0.0;
}
else { // PlasmaUnit
int j;
for (j = 0; j < imin(n, m); j++) {
value[j] = 1.0;
for (int i = j+1; i < m; i++) {
value[j] += fabsf(A[lda*j+i]);
}
}
for (; j < n; j++)
value[j] = 0.0;
}
}
}
}
break;
case PlasmaInfNorm:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:m])
{
if (sequence->status == PlasmaSuccess) {
if (uplo == PlasmaUpper) {
if (diag == PlasmaNonUnit) {
for (int i = 0; i < m; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < imin(j+1, m); i++) {
value[i] += fabsf(A[lda*j+i]);
}
}
}
else { // PlasmaUnit
int i;
for (i = 0; i < imin(m, n); i++)
value[i] = 1.0;
for (; i < m; i++)
value[i] = 0.0;
int j;
for (j = 0; j < imin(n, m); j++) {
for (i = 0; i < j; i++) {
value[i] += fabsf(A[lda*j+i]);
}
}
for (; j < n; j++) {
for (i = 0; i < m; i++) {
value[i] += fabsf(A[lda*j+i]);
}
}
}
}
else { // PlasmaLower
if (diag == PlasmaNonUnit) {
for (int i = 0; i < m; i++)
value[i] = 0.0;
for (int j = 0; j < imin(n, m); j++) {
for (int i = j; i < m; i++) {
value[i] += fabsf(A[lda*j+i]);
}
}
}
else { // PlasmaUnit
int i;
for (i = 0; i < imin(m, n); i++)
value[i] = 1.0;
for (; i < m; i++)
value[i] = 0.0;
for (int j = 0; j < imin(n, m); j++) {
for (i = j+1; i < m; i++) {
value[i] += fabsf(A[lda*j+i]);
}
}
}
}
}
}
break;
}
}
|
omp_fdtd-2d.c | #include "fdtd-2d.h"
#include <omp.h>
double bench_t_start, bench_t_end;
static
double rtclock(){
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, NULL);
if(stat != 0)
printf("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
void bench_timer_start(){
bench_t_start = omp_get_wtime();
}
void bench_timer_stop(){
bench_t_end = omp_get_wtime();
}
void bench_timer_print(){
printf("Time in seconds = %0.6lf\n", bench_t_end - bench_t_start);
}
static
void init_array(int tmax,
int nx,
int ny,
float ex[nx][ny],
float ey[nx][ny],
float hz[nx][ny],
float _fict_[tmax]){
int i, j;
for(i = 0; i < tmax; i++){
//printf("ThreadId: %d --- i: %d\n", omp_get_thread_num(), i);
_fict_[i] = (float) i;
}
for(i = 0; i < nx; i++)
for(j = 0; j < ny; j++){
ex[i][j] = ((float) i * (j + 1)) / nx;
ey[i][j] = ((float) i * (j + 2)) / ny;
hz[i][j] = ((float) i * (j + 3)) / nx;
}
}
static
void print_array(int nx,
int ny,
float ex[nx][ny],
float ey[nx][ny],
float hz[nx][ny]){
int i, j;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "ex");
for(i = 0; i < nx; i++)
for(j = 0; j < ny; j++){
if((i * nx + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2f ", ex[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "ex");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "ey");
for(i = 0; i < nx; i++)
for(j = 0; j < ny; j++){
if((i * nx + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2f ", ey[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "ey");
fprintf(stderr, "begin dump: %s", "hz");
for(i = 0; i < nx; i++)
for(j = 0; j < ny; j++){
if((i * nx + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2f ", hz[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "hz");
}
static
void kernel_fdtd_2d(int tmax,
int nx,
int ny,
float ex[nx][ny],
float ey[nx][ny],
float hz[nx][ny],
const float _fict_[tmax]){
int t, i, j;
#pragma omp parallel private(t, i, j)
{
for(t = 0; t < tmax; t++){
#pragma omp for
for(j = 0; j < ny; j++)
ey[0][j] = _fict_[t];
#pragma omp for collapse(2)
for(i = 1; i < nx; i++)
for(j = 0; j < ny; j++)
ey[i][j] = ey[i][j] - 0.5f * (hz[i][j] - hz[i - 1][j]);
#pragma omp for collapse(2)
for(i = 0; i < nx; i++)
for(j = 1; j < ny; j++)
ex[i][j] = ex[i][j] - 0.5f * (hz[i][j] - hz[i][j - 1]);
#pragma omp for collapse(2)
for(i = 0; i < nx - 1; i++)
for(j = 0; j < ny - 1; j++)
hz[i][j] = hz[i][j] - 0.7f * (ex[i][j + 1] - ex[i][j] + ey[i + 1][j] - ey[i][j]);
}
}
}
int main(int argc, char **argv){
int tmax = TMAX;
int nx = NX;
int ny = NY;
float (*ex)[nx][ny];
ex = (float (*)[nx][ny]) malloc((nx) * (ny) * sizeof(float));
float (*ey)[nx][ny];
ey = (float (*)[nx][ny]) malloc((nx) * (ny) * sizeof(float));
float (*hz)[nx][ny];
hz = (float (*)[nx][ny]) malloc((nx) * (ny) * sizeof(float));
float (*_fict_)[tmax];
_fict_ = (float (*)[tmax]) malloc((tmax) * sizeof(float));
//int num_threads = argc > 1 ? atoi(argv[1]) : 1;
//printf("num_threads: %d\n", num_threads);
//omp_set_num_threads(num_threads);
init_array(tmax, nx, ny,
*ex,
*ey,
*hz,
*_fict_);
bench_timer_start();
kernel_fdtd_2d(tmax, nx, ny,
*ex,
*ey,
*hz,
*_fict_);
//omp_get_max_threads()
bench_timer_stop();
printf("==========================\n");
printf("THREADS: %d\n", omp_get_max_threads());
bench_timer_print();
printf("==========================\n\n");
if(argc > 42 && !strcmp(argv[0], "")) print_array(nx, ny, *ex, *ey, *hz);
free((void *) ex);
free((void *) ey);
free((void *) hz);
free((void *) _fict_);
return 0;
}
|
08_global_loop_atomic.c | /*THIS PROGRAM DOESN'T WORK PROPERLY*/
#include <stdio.h>
#include <omp.h>
#define MAX_ITS 10000
int main(){
int its_global, i;
its_global = 0;
#pragma omp parallel for
for (i=0;i<MAX_ITS;++i){
/*The atomic section means that operations in this section
cannot be interrupted by other threads. Only certain
mathematical operations can be performed in an atomic section*/
#pragma omp atomic
{
its_global++;
}
}
printf("Counter records %i iterations\n", its_global);
}
|
file.c | /**
* \file file.c
* \brief nfdump file loading, handling and saving functions
*
* \author J.R.Versteegh <j.r.versteegh@orca-st.com>
*
* \copyright
* (C) 2017 Jaap Versteegh. All rights reserved.
* (C) 2017 SURFnet. All rights reserved.
* \license
* This software may be modified and distributed under the
* terms of the BSD license. See the LICENSE file for details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "utils.h"
#include "compress.h"
#include "file.h"
// Private functions
static int _read_block(FILE *f, nf_block_t* block);
static int _write_block(FILE *f, nf_block_t* block);
static int _blocks_status(const nf_file_p file);
static void _handle_free_block(int blocknum, nf_block_p block);
nf_file_p file_new()
{
return (nf_file_p)calloc(1, sizeof(nf_file_t));
}
nf_file_p file_load(const char* filename, block_handler_p handle_block) {
nf_file_p fl = file_new();
if (fl == NULL) {
msg(log_error, "Failed to allocate file buffer\n");
return NULL;
}
msg(log_info, "Reading %s\n", filename);
FILE *f = fopen(filename, "rb");
if (!f) {
msg(log_error, "Failed to open: %s\n", filename);
goto failure;
}
size_t bytes_read = fread(&fl->header, 1, sizeof(fl->header), f);
if (bytes_read != sizeof(fl->header)) {
msg(log_error, "Failed to read file header\n");
goto failure;
}
msg(log_debug, "Read file header\n");
bytes_read = fread(&fl->stats, 1, sizeof(fl->stats), f);
if (bytes_read != sizeof(fl->stats)) {
msg(log_error, "Failed to read file stats\n");
goto failure;
}
msg(log_debug, "Read file stats\n");
size_t blocks_size = fl->header.NumBlocks * sizeof(nf_block_p);
nf_file_p new_fl = (nf_file_p)realloc(fl, sizeof(nf_file_t) + blocks_size);
if (new_fl == NULL) {
msg(log_error, "Failed to re-allocate file buffer\n");
goto failure;
}
fl = new_fl;
memset(&fl->blocks, 0, blocks_size);
compression_t file_compression =
fl->header.flags & FLAG_LZO_COMPRESSED ? compressed_lzo :
fl->header.flags & FLAG_BZ2_COMPRESSED ? compressed_bz2 :
fl->header.flags & FLAG_LZ4_COMPRESSED ? compressed_lz4 :
fl->header.flags & FLAG_LZMA_COMPRESSED ? compressed_lzma :
compressed_none;
msg(log_info, "File compression: %d flags: %u\n", file_compression, fl->header.flags);
int blocks_read = 0;
#pragma omp parallel
#pragma omp master
for (;;) {
nf_block_p block = block_new();
if (block == NULL) {
msg(log_error, "Failed to allocate block buffer\n");
break;
}
if (_read_block(f, block) != 0) {
free(block);
break;
}
int block_idx = blocks_read++;
if (blocks_read > fl->header.NumBlocks) {
blocks_size = blocks_read * sizeof(nf_block_p);
new_fl = (nf_file_p)realloc(fl, sizeof(nf_file_t) + blocks_size);
if (new_fl == NULL) {
msg(log_error, "Failed to re-allocate file buffer\n");
break;
}
fl = new_fl;
msg(log_info, "Fixed block count in header. found %d, header %d\n", blocks_read, fl->header.NumBlocks);
fl->header.NumBlocks = blocks_read;
}
fl->blocks[block_idx] = block;
// Catalog blocks are not compressed
block->compression = block->header.id == CATALOG_BLOCK ? compressed_none : file_compression;
block->file_compression = block->compression;
size_t size = block->header.size;
block->compressed_size = size;
block->uncompressed_size = size;
if (handle_block != NULL) {
#pragma omp task firstprivate(block_idx, block)
handle_block(block_idx, block);
}
}
if (blocks_read < fl->header.NumBlocks) {
msg(log_error, "Missing blocks in file. found %d, expected %d\n", blocks_read, fl->header.NumBlocks);
goto failure;
}
if (_blocks_status(fl) < 0) {
msg(log_error, "One or more blocks failed to load properly\n");
goto failure;
}
fl->size = ftell(f);
fclose(f);
return fl;
failure:
if (f)
fclose(f);
file_free(&fl);
return NULL;
}
void file_free(nf_file_p *file) {
if (*file == NULL)
return;
nf_file_p fl = *file;
*file = NULL;
file_for_each_block(fl, &_handle_free_block);
free(fl);
}
int file_for_each_block(const nf_file_p file, block_handler_p handle_block) {
#pragma omp parallel for
for (int i = 0; i < file->header.NumBlocks; ++i) {
handle_block(i, file->blocks[i]);
}
return _blocks_status(file);
}
static int _blocks_status(const nf_file_p file) {
int result = 0;
for (int i = 0; i < file->header.NumBlocks; ++i) {
if (file->blocks[i]->status < result) {
result = file->blocks[i]->status;
}
}
return result;
}
int file_save(const nf_file_p file) {
}
int file_save_as(nf_file_p file, const char* filename) {
msg(log_info, "Writing %s\n", filename);
if (file->header.NumBlocks == 0) {
msg(log_error, "Not saving empty file");
return -1;
}
compression_t file_compression = file->blocks[0]->compression;
// Switch of all compression flags
for (compression_t cmpr = compressed_none; cmpr < compressed_term; ++cmpr) {
file->header.flags &= ~compression_flags[cmpr];
}
// ... and then select the compression method of the first block as compression type
file->header.flags |= compression_flags[file_compression];
msg(log_info, "File compression: %d flags: %u\n", file_compression, file->header.flags);
FILE *f = fopen(filename, "wb");
if (!f) {
msg(log_error, "Failed to open: %s\n", filename);
goto failure;
}
size_t bytes_written = fwrite(&file->header, 1, sizeof(file->header), f);
if (bytes_written != sizeof(file->header)) {
msg(log_error, "Failed to write file header\n");
goto failure;
}
msg(log_debug, "Written file header\n");
bytes_written = fwrite(&file->stats, 1, sizeof(file->stats), f);
if (bytes_written != sizeof(file->stats)) {
msg(log_error, "Failed to write file stats\n");
goto failure;
}
msg(log_debug, "Written file stats\n");
for (int i = 0; i < file->header.NumBlocks; ++i) {
int result = _write_block(f, file->blocks[i]);
if (result != 0)
goto failure;
}
free(file->name);
file->name = strdup(filename);
fclose(f);
return 0;
failure:
return -1;
}
static int _read_block(FILE *f, nf_block_t* block) {
size_t bytes_read = fread(&block->header, 1, sizeof(block->header), f);
if (bytes_read != sizeof(block->header)) {
// Only whine when not immediately at end of file.
if (bytes_read != 0)
msg(log_error, "Failed to read block header\n");
goto failure;
}
block->data = (char*)malloc(block->header.size);
if (block->data == NULL) {
msg(log_error, "Failed to allocate block data\n");
goto failure;
}
bytes_read = fread(block->data, 1, block->header.size, f);
if (bytes_read != block->header.size) {
msg(log_error, "Failed to read block data\n");
goto failure;
}
block->status = 0;
return 0;
failure:
free(block->data);
block->data = NULL;
block->header.size = 0;
block->status = -1;
return -1;
}
static int _write_block(FILE *f, nf_block_t* block) {
if (block->status != 0) {
msg(log_error, "Invalid block\n");
goto failure;
}
size_t bytes_written = fwrite(&block->header, 1, sizeof(block->header), f);
if (bytes_written != sizeof(block->header)) {
msg(log_error, "Failed to write block header\n");
goto failure;
}
bytes_written = fwrite(block->data, 1, block->header.size, f);
if (bytes_written != block->header.size) {
msg(log_error, "Failed to write block data\n");
goto failure;
}
return 0;
failure:
return -1;
}
static void _handle_free_block(int blocknum, nf_block_p block) {
block_free(&block);
}
|
declare_simd_aarch64.c | // REQUIRES: aarch64-registered-target
// -fopemp and -fopenmp-simd behavior are expected to be the same.
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s --check-prefix=AARCH64
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp-simd -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s --check-prefix=AARCH64
#pragma omp declare simd
#pragma omp declare simd simdlen(2)
#pragma omp declare simd simdlen(6)
#pragma omp declare simd simdlen(8)
double foo(float x);
// AARCH64: "_ZGVnM2v_foo" "_ZGVnM4v_foo" "_ZGVnM8v_foo" "_ZGVnN2v_foo" "_ZGVnN4v_foo" "_ZGVnN8v_foo"
// AARCH64-NOT: _ZGVnN6v_foo
void foo_loop(double *x, float *y, int N) {
for (int i = 0; i < N; ++i) {
x[i] = foo(y[i]);
}
}
// make sure that the following two function by default gets generated
// with 4 and 2 lanes, as descrived in the vector ABI
#pragma omp declare simd notinbranch
float bar(double x);
#pragma omp declare simd notinbranch
double baz(float x);
// AARCH64: "_ZGVnN2v_baz" "_ZGVnN4v_baz"
// AARCH64-NOT: baz
// AARCH64: "_ZGVnN2v_bar" "_ZGVnN4v_bar"
// AARCH64-NOT: bar
void baz_bar_loop(double *x, float *y, int N) {
for (int i = 0; i < N; ++i) {
x[i] = baz(y[i]);
y[i] = bar(x[i]);
}
}
/***************************/
/* 32-bit integer tests */
/***************************/
#pragma omp declare simd
#pragma omp declare simd simdlen(2)
#pragma omp declare simd simdlen(6)
#pragma omp declare simd simdlen(8)
long foo_int(int x);
// AARCH64: "_ZGVnN2v_foo_int" "_ZGVnN4v_foo_int" "_ZGVnN8v_foo_int"
// No non power of two
// AARCH64-NOT: _ZGVnN6v_foo_int
void foo_int_loop(long *x, int *y, int N) {
for (int i = 0; i < N; ++i) {
x[i] = foo_int(y[i]);
}
}
#pragma omp declare simd
char simple_8bit(char);
// AARCH64: "_ZGVnM16v_simple_8bit" "_ZGVnM8v_simple_8bit" "_ZGVnN16v_simple_8bit" "_ZGVnN8v_simple_8bit"
#pragma omp declare simd
short simple_16bit(short);
// AARCH64: "_ZGVnM4v_simple_16bit" "_ZGVnM8v_simple_16bit" "_ZGVnN4v_simple_16bit" "_ZGVnN8v_simple_16bit"
#pragma omp declare simd
int simple_32bit(int);
// AARCH64: "_ZGVnM2v_simple_32bit" "_ZGVnM4v_simple_32bit" "_ZGVnN2v_simple_32bit" "_ZGVnN4v_simple_32bit"
#pragma omp declare simd
long simple_64bit(long);
// AARCH64: "_ZGVnM2v_simple_64bit" "_ZGVnN2v_simple_64bit"
#pragma omp declare simd
#pragma omp declare simd simdlen(32)
char a01(int x);
// AARCH64: "_ZGVnN16v_a01" "_ZGVnN32v_a01" "_ZGVnN8v_a01"
// AARCH64-NOT: a01
#pragma omp declare simd
#pragma omp declare simd simdlen(2)
long a02(short x);
// AARCH64: "_ZGVnN2v_a02" "_ZGVnN4v_a02" "_ZGVnN8v_a02"
// AARCH64-NOT: a02
/************/
/* pointers */
/************/
#pragma omp declare simd
int b01(int *x);
// AARCH64: "_ZGVnN4v_b01"
// AARCH64-NOT: b01
#pragma omp declare simd
char b02(char *);
// AARCH64: "_ZGVnN16v_b02" "_ZGVnN8v_b02"
// AARCH64-NOT: b02
#pragma omp declare simd
double *b03(double *);
// AARCH64: "_ZGVnN2v_b03"
// AARCH64-NOT: b03
/***********/
/* masking */
/***********/
#pragma omp declare simd inbranch
int c01(double *x, short y);
// AARCH64: "_ZGVnM8vv_c01"
// AARCH64-NOT: c01
#pragma omp declare simd inbranch uniform(x)
double c02(double *x, char y);
// AARCH64: "_ZGVnM16uv_c02" "_ZGVnM8uv_c02"
// AARCH64-NOT: c02
/*************************/
/* sincos-like signature */
/*************************/
#pragma omp declare simd linear(sin) linear(cos)
void sincos(double in, double *sin, double *cos);
// AARCH64: "_ZGVnN2vll_sincos"
// AARCH64-NOT: sincos
#pragma omp declare simd linear(sin : 1) linear(cos : 2)
void SinCos(double in, double *sin, double *cos);
// AARCH64: "_ZGVnN2vll2_SinCos"
// AARCH64-NOT: SinCos
// Selection of tests based on the examples provided in chapter 5 of
// the Vector Function ABI specifications for AArch64, at
// https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
// Listing 2, p. 18
#pragma omp declare simd inbranch uniform(x) linear(val(i) : 4)
int foo2(int *x, int i);
// AARCH64: "_ZGVnM2ul4_foo2" "_ZGVnM4ul4_foo2"
// AARCH64-NOT: foo2
// Listing 3, p. 18
#pragma omp declare simd inbranch uniform(x, c) linear(i \
: c)
int foo3(int *x, int i, unsigned char c);
// AARCH64: "_ZGVnM16uls2u_foo3" "_ZGVnM8uls2u_foo3"
// AARCH64-NOT: foo3
// Listing 6, p. 19
#pragma omp declare simd linear(x) aligned(x : 16) simdlen(4)
int foo4(int *x, float y);
// AARCH64: "_ZGVnM4la16v_foo4" "_ZGVnN4la16v_foo4"
// AARCH64-NOT: foo4
static int *I;
static char *C;
static short *S;
static long *L;
static float *F;
static double *D;
void do_something() {
simple_8bit(*C);
simple_16bit(*S);
simple_32bit(*I);
simple_64bit(*L);
*C = a01(*I);
*L = a02(*S);
*I = b01(I);
*C = b02(C);
D = b03(D);
*I = c01(D, *S);
*D = c02(D, *S);
sincos(*D, D, D);
SinCos(*D, D, D);
foo2(I, *I);
foo3(I, *I, *C);
foo4(I, *F);
}
typedef struct S {
char R, G, B;
} STy;
#pragma omp declare simd notinbranch
STy DoRGB(STy x);
// AARCH64: "_ZGVnN2v_DoRGB"
static STy *RGBData;
void do_rgb_stuff() {
DoRGB(*RGBData);
}
|
simulation.c | /* -------------------------------------------------------------------------- */
#include "io_binary.h"
/* -------------------------------------------------------------------------- */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
/* -------------------------------------------------------------------------- */
void allocate_grids(int n, float ***u, float ***uo, float ***f) {
int i;
// Allocation of the data storage
float *u_data = (float *)malloc(n * n * sizeof(float));
float *uo_data = (float *)malloc(n * n * sizeof(float));
float *f_data = (float *)malloc(n * n * sizeof(float));
// Allocation of arrays of pointers for rows
*u = (float **)malloc(n * sizeof(float *));
*uo = (float **)malloc(n * sizeof(float *));
*f = (float **)malloc(n * sizeof(float *));
// set the row pointers in the memory
for (i = 0; i < n; i++) {
(*u)[i] = u_data + i * n;
(*uo)[i] = uo_data + i * n;
(*f)[i] = f_data + i * n;
}
}
void deallocate_grids(float ***uo, float ***u, float ***f) {
// de-allocate the data
free((*u)[0]);
free((*uo)[0]);
free((*f)[0]);
// de-allocate the rows pointers
free(*u);
free(*uo);
free(*f);
*u = NULL;
*uo = NULL;
*f = NULL;
}
void swap_grids(float ***uo, float ***u) {
float **tmp = *uo;
*uo = *u;
*u = tmp;
}
void initialize_grids(int n, float **uo, float **u, float **f, float h) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
u[i][j] = 0;
uo[i][j] = 0;
f[i][j] = -2. * 100. * M_PI * M_PI * sin(10. * M_PI * i * h) *
sin(10. * M_PI * j * h);
}
}
}
static inline float compute_row(int i, int n, float **uo, float **u, float **f,
float h) {
int j;
float l2 = 0.;
for (j = 1; j < n - 1; j++) {
// computation of the new step
u[i][j] = 0.25 * (uo[i - 1][j] + uo[i + 1][j] + uo[i][j - 1] +
uo[i][j + 1] - f[i][j] * h * h);
// L2 norm
l2 += (uo[i][j] - u[i][j]) * (uo[i][j] - u[i][j]);
}
return l2;
}
float compute_step(int n, float **uo, float **u, float **f, float h) {
float l2 = 0.;
int i;
#pragma omp parallel for reduction(+:l2)
for (i = 1; i < n - 1; i++) {
l2 += compute_row(i, n, uo, u, f, h);
}
return l2;
}
float simulate(int n, float **uo, float **u, float **f, float h, float epsilon, int * k) {
float l2 = 0.;
*k = 0;
do {
l2 = compute_step(n, uo, u, f, h);
// copy new grid in old grid
swap_grids(&uo, &u);
++(*k);
} while (l2 > epsilon);
// write the new old grid to disk
write_to_file(n, uo, (*k), -1., 1.);
return l2;
}
|
alignblt.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : alignblt.c
* Description : align blit
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_alignblt_c__
#define __libaroma_alignblt_c__
#include <aroma_internal.h>
void libaroma_blt_align16(wordp __restrict dst, wordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int w2 = w<<1;
int ds = w2 + dst_stride;
int ss = w2 + src_stride;
bytep d = (bytep) dst;
bytep s = (bytep) src;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
memcpy(
d+ds*i, s+ss*i, w2
);
}
}
void libaroma_blt_align32_to16(wordp __restrict dst, dwordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int dline = w+(dst_stride>>1);
int sline = w+(src_stride>>2);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_dither_line(
i, w, dst+dline*i, src+sline*i
);
}
}
void libaroma_blt_align16_to32(dwordp __restrict dst, wordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int dline = w+(dst_stride>>2);
int sline = w+(src_stride>>1);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_btl32(
w,dst+dline*i,src+sline*i
);
}
}
void libaroma_blt_align32(dwordp __restrict dst, dwordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int w4 = w<<2;
int ds = w4 + dst_stride;
int ss = w4 + src_stride;
bytep d = (bytep) dst;
bytep s = (bytep) src;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
memcpy(
d+ds*i, s+ss*i, w4
);
}
}
void libaroma_blt_align_to32_pos(dwordp __restrict dst, wordp __restrict src,
int w, int h, int dst_stride, int src_stride,
bytep rgb_pos) {
int i;
int dline = w+(dst_stride>>2);
int sline = w+(src_stride>>1);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_color_copy32(
dst+dline*i, src+sline*i, w, rgb_pos
);
}
}
void libaroma_blt_align_to16_pos(wordp __restrict dst, dwordp __restrict src,
int w, int h, int dst_stride, int src_stride,
bytep __restrict rgb_pos) {
int i;
int dline = w+(dst_stride>>1);
int sline = w+(src_stride>>2);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_color_copy16(
dst+dline*i, src+sline*i, w, rgb_pos
);
}
}
#endif /* __libaroma_alignblt_c__ */
|
cryptocontext.h | /**
* @file cryptocontext.h -- Control for encryption operations.
* @author TPOC: contact@palisade-crypto.org
*
* @section LICENSE
*
* @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT))
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SRC_PKE_CRYPTOCONTEXT_H_
#define SRC_PKE_CRYPTOCONTEXT_H_
#include "palisade.h"
#include "scheme/allscheme.h"
#include "cryptocontexthelper.h"
#include "cryptotiming.h"
#include "utils/serial.h"
#include "utils/serialize-binary.h"
#include "utils/serialize-json.h"
namespace lbcrypto {
template<typename Element>
class CryptoContextFactory;
template<typename Element>
class CryptoContextImpl;
template<typename Element>
using CryptoContext = shared_ptr<CryptoContextImpl<Element>>;
/**
* @brief CryptoContextImpl
*
* A CryptoContextImpl is the object used to access the PALISADE library
*
* All PALISADE functionality is accessed by way of an instance of a CryptoContextImpl; we say that various objects are
* "created in" a context, and can only be used in the context in which they were created
*
* All PALISADE methods are accessed through CryptoContextImpl methods. Guards are implemented to make certain that
* only valid objects that have been created in the context are used
*
* Contexts are created using the CryptoContextFactory, and can be serialized and recovered from a serialization
*/
template<typename Element>
class CryptoContextImpl : public Serializable {
friend class CryptoContextFactory<Element>;
protected:
shared_ptr<LPCryptoParameters<Element>> params; /*!< crypto parameters used for this context */
shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; /*!< algorithm used; accesses all crypto methods */
static std::map<string,std::vector<LPEvalKey<Element>>> evalMultKeyMap; /*!< cached evalmult keys, by secret key UID */
static std::map<string,shared_ptr<std::map<usint,LPEvalKey<Element>>>> evalSumKeyMap; /*!< cached evalsum keys, by secret key UID */
static std::map<string,shared_ptr<std::map<usint,LPEvalKey<Element>>>> evalAutomorphismKeyMap; /*!< cached evalautomorphism keys, by secret key UID */
bool doTiming;
vector<TimingInfo>* timeSamples;
string m_schemeId;
size_t m_keyGenLevel;
/**
* TypeCheck makes sure that an operation between two ciphertexts is permitted
* @param a
* @param b
*/
void TypeCheck(ConstCiphertext<Element> a, ConstCiphertext<Element> b) const {
if( a == NULL || b == NULL )
PALISADE_THROW( type_error, "Null Ciphertext");
if( a->GetCryptoContext().get() != this )
PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContext");
if( a->GetCryptoContext() != b->GetCryptoContext() )
PALISADE_THROW( type_error, "Ciphertexts were not created in the same CryptoContext");
if( a->GetKeyTag() != b->GetKeyTag() )
PALISADE_THROW( type_error, "Ciphertexts were not encrypted with same keys" );
if( a->GetEncodingType() != b->GetEncodingType() ) {
stringstream ss;
ss << "Ciphertext encoding types " << a->GetEncodingType();
ss << " and " << b->GetEncodingType();
ss << " do not match";
PALISADE_THROW( type_error, ss.str() );
}
}
/**
* TypeCheck makes sure that an operation between two ciphertexts is permitted
* This is intended for mutable methods, hence inputs are Ciphretext instead
* of ConstCiphertext.
*
* @param a
* @param b
*/
void TypeCheck(Ciphertext<Element> a, Ciphertext<Element> b) const {
if( a == NULL || b == NULL )
PALISADE_THROW( type_error, "Null Ciphertext");
if( a->GetCryptoContext().get() != this )
PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContext");
if( a->GetCryptoContext() != b->GetCryptoContext() )
PALISADE_THROW( type_error, "Ciphertexts were not created in the same CryptoContext");
if( a->GetKeyTag() != b->GetKeyTag() )
PALISADE_THROW( type_error, "Ciphertexts were not encrypted with same keys" );
if( a->GetEncodingType() != b->GetEncodingType() ) {
stringstream ss;
ss << "Ciphertext encoding types " << a->GetEncodingType();
ss << " and " << b->GetEncodingType();
ss << " do not match";
PALISADE_THROW( type_error, ss.str() );
}
}
/**
* TypeCheck makes sure that an operation between a ciphertext and a plaintext is permitted
* @param a
* @param b
*/
void TypeCheck(ConstCiphertext<Element> a, ConstPlaintext b) const {
if( a == NULL )
PALISADE_THROW( type_error, "Null Ciphertext");
if( b == NULL )
PALISADE_THROW( type_error, "Null Plaintext");
if( a->GetCryptoContext().get() != this )
PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContext");
if( a->GetEncodingType() != b->GetEncodingType() ) {
stringstream ss;
ss << "Ciphertext encoding type " << a->GetEncodingType();
ss << " and Plaintext encoding type " << b->GetEncodingType();
ss << " do not match";
PALISADE_THROW( type_error, ss.str() );
}
}
/**
* TypeCheck makes sure that an operation between two ciphertexts is permitted
* @param a
* @param b
*/
void TypeCheck(const RationalCiphertext<Element>& a, const RationalCiphertext<Element>& b) const {
if( a.GetCryptoContext().get() != this )
PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl");
if( a.GetCryptoContext() != b.GetCryptoContext() )
PALISADE_THROW( type_error, "Ciphertexts were not created in the same CryptoContextImpl");
if( a.GetKeyTag() != b.GetKeyTag() )
PALISADE_THROW( type_error, "Ciphertexts were not encrypted with same keys" );
if( a.GetNumerator()->GetEncodingType() != b.GetNumerator()->GetEncodingType() ) {
stringstream ss;
ss << "RationalCiphertext encoding types " << a.GetNumerator()->GetEncodingType();
ss << " and " << b.GetNumerator()->GetEncodingType();
ss << " do not match";
PALISADE_THROW( type_error, ss.str() );
}
}
/**
* TypeCheck makes sure that an operation between a ciphertext and a plaintext is permitted
* @param a
* @param b
*/
void TypeCheck(const RationalCiphertext<Element>& a, ConstPlaintext b) const {
if( b == NULL )
PALISADE_THROW( type_error, "Null Plaintext");
if( a.GetCryptoContext().get() != this )
PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl");
if( a.GetNumerator()->GetEncodingType() != b->GetEncodingType() ){
stringstream ss;
ss << "RationalCiphertext encoding type " << a.GetNumerator()->GetEncodingType();
ss << " and Plaintext encoding type " << b->GetEncodingType();
ss << " do not match";
PALISADE_THROW( type_error, ss.str() );
}
}
bool Mismatched(const CryptoContext<Element> a) const {
if( a.get() != this ) {
return true;
}
return false;
}
public:
LPPrivateKey<Element> privateKey;
/**
* This stores the private key in the crypto context.
* This is only intended for debugging and should not be
* used in production systems. Please define DEBUG_KEY in
* palisade.h to enable this.
*
* If used, one can create a key pair and store the secret
* key in th crypto context like this:
*
* auto keys = cc->KeyGen();
* cc->SetPrivateKey(keys.secretKey);
*
* After that, anyone in the code, one can access the
* secret key by getting the crypto context and doing the
* following:
*
* auto sk = cc->GetPrivateKey();
*
* This key can be used for decrypting any intermediate
* ciphertexts for debugging purposes.
*
* @param sk the secret key
*
*/
void SetPrivateKey(const LPPrivateKey<Element> sk) {
#ifdef DEBUG_KEY
cerr << "Warning - SetPrivateKey is only intended to be used for debugging purposes - not for production systems." << endl;
this->privateKey = sk;
#else
throw std::runtime_error("SetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h");
#endif
}
/**
* This gets the private key from the crypto context.
* This is only intended for debugging and should not be
* used in production systems. Please define DEBUG_KEY in
* palisade.h to enable this.
*
* If used, one can create a key pair and store the secret
* key in th crypto context like this:
*
* auto keys = cc->KeyGen();
* cc->SetPrivateKey(keys.secretKey);
*
* After that, anyone in the code, one can access the
* secret key by getting the crypto context and doing the
* following:
*
* auto sk = cc->GetPrivateKey();
*
* This key can be used for decrypting any intermediate
* ciphertexts for debugging purposes.
*
* @return the secret key
*
*/
const LPPrivateKey<Element> GetPrivateKey() {
#ifdef DEBUG_KEY
return this->privateKey;
#else
throw std::runtime_error("GetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h");
#endif
}
void setSchemeId(string schemeTag) {
this->m_schemeId = schemeTag;
}
string getSchemeId() {
return this->m_schemeId;
}
/**
* CryptoContextImpl constructor from pointers to parameters and scheme
* @param params - pointer to CryptoParameters
* @param scheme - pointer to Crypto Scheme
*/
CryptoContextImpl(LPCryptoParameters<Element> *params = 0, LPPublicKeyEncryptionScheme<Element> *scheme = 0, const string & schemeId = "Not") {
this->params.reset(params);
this->scheme.reset(scheme);
this->doTiming = false;
this->timeSamples = 0;
this->m_keyGenLevel = 0;
this->m_schemeId = schemeId;
}
/**
* CryptoContextImpl constructor from shared pointers to parameters and scheme
* @param params - shared pointer to CryptoParameters
* @param scheme - sharedpointer to Crypto Scheme
*/
CryptoContextImpl(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string & schemeId = "Not") {
this->params = params;
this->scheme = scheme;
this->doTiming = false;
this->timeSamples = 0;
this->m_keyGenLevel = 0;
this->m_schemeId = schemeId;
}
/**
* Copy constructor
* @param c - source
*/
CryptoContextImpl(const CryptoContextImpl<Element>& c) {
params = c.params;
scheme = c.scheme;
doTiming = c.doTiming;
timeSamples = c.timeSamples;
this->m_keyGenLevel = 0;
this->m_schemeId = c.m_schemeId;
}
/**
* Assignment
* @param rhs - assigning from
* @return this
*/
CryptoContextImpl<Element>& operator=(const CryptoContextImpl<Element>& rhs) {
params = rhs.params;
scheme = rhs.scheme;
doTiming = rhs.doTiming;
timeSamples = rhs.timeSamples;
m_keyGenLevel = rhs.m_keyGenLevel;
m_schemeId = rhs.m_schemeId;
return *this;
}
/**
* A CryptoContextImpl is only valid if the shared pointers are both valid
*/
operator bool() const { return bool(params) && bool(scheme); }
/**
* Private methods to compare two contexts; this is only used internally and is not generally available
* @param a - operand 1
* @param b - operand 2
* @return true if the implementations have identical parms and scheme
*/
friend bool operator==(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) {
// Identical if the parameters and the schemes are identical... the exact same object,
// OR the same type and the same values
if( a.params.get() == b.params.get() ) {
return true;
}
else {
if( typeid(*a.params.get()) != typeid(*b.params.get()) ) {
return false;
}
if( *a.params.get() != *b.params.get() )
return false;
}
if( a.scheme.get() == b.scheme.get() ) {
return true;
}
else {
if( typeid(*a.scheme.get()) != typeid(*b.scheme.get()) ) {
return false;
}
if( *a.scheme.get() != *b.scheme.get() )
return false;
}
return true;
}
friend bool operator!=(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) {
return !( a == b );
}
// TIMING METHODS
/**
* StartTiming method activates timing of CryptoMethods
*
* @param timeSamples points to a vector in which timing samples will be stored
*/
void StartTiming(vector<TimingInfo>* timeSamples) {
this->timeSamples = timeSamples;
doTiming = true;
}
/*
* StopTiming - turns off timing
*/
void StopTiming() {
doTiming = false;
}
/**
* ResumeTiming - re-enables timing with existing TimingInfo vector
*/
void ResumeTiming() {
doTiming = true;
}
/**
* ResetTiming - erases measurements
*/
void ResetTiming() {
this->timeSamples->clear();
}
static bool SerializeEvalMultKey(Serialized* serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool SerializeEvalMultKey(Serialized* serObj, const string& id) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool SerializeEvalMultKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool DeserializeEvalMultKey(Serialized* serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
/**
* SerializeEvalMultKey for a single EvalMult key or all EvalMult keys
*
* @param ser - stream to serialize to
* @param sertype - type of serialization
* @param id for key to serialize - if empty string, serialize them all
* @return true on success
*/
template<typename ST>
static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, string id = "") {
decltype(evalMultKeyMap) *smap;
decltype(evalMultKeyMap) omap;
if( id.length() == 0 )
smap = &evalMultKeyMap;
else {
auto k = evalMultKeyMap.find(id);
if( k == evalMultKeyMap.end() )
return false; // no such id
smap = &omap;
omap[ k->first ] = k->second;
}
Serial::Serialize(*smap, ser, sertype);
return true;
}
/**
* SerializeEvalMultKey for all EvalMultKeys made in a given context
*
* @param cc whose keys should be serialized
* @param ser - stream to serialize to
* @param sertype - type of serialization
* @return true on success (false on failure or no keys found)
*/
template<typename ST>
static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) {
decltype(evalMultKeyMap) omap;
for( const auto& k : evalMultKeyMap ) {
if( k.second[0]->GetCryptoContext() == cc ) {
omap[k.first] = k.second;
}
}
if( omap.size() == 0 )
return false;
Serial::Serialize(omap, ser, sertype);
return true;
}
/**
* DeserializeEvalMultKey deserialize all keys in the serialization
* deserialized keys silently replace any existing matching keys
* deserialization will create CryptoContextImpl if necessary
*
* @param serObj - stream with a serialization
* @return true on success
*/
template<typename ST>
static bool DeserializeEvalMultKey(std::istream& ser, const ST& sertype) {
decltype(evalMultKeyMap) evalMultKeys;
Serial::Deserialize(evalMultKeys, ser, sertype);
// The deserialize call created any contexts that needed to be created.... so all we need to do
// is put the keys into the maps for their context
for( auto k : evalMultKeys ) {
evalMultKeyMap[ k.first ] = k.second;
}
return true;
}
/**
* ClearEvalMultKeys - flush EvalMultKey cache
*/
static void ClearEvalMultKeys();
/**
* ClearEvalMultKeys - flush EvalMultKey cache for a given id
* @param id
*/
static void ClearEvalMultKeys(const string& id);
/**
* ClearEvalMultKeys - flush EvalMultKey cache for a given context
* @param cc
*/
static void ClearEvalMultKeys(const CryptoContext<Element> cc);
/**
* InsertEvalMultKey - add the given vector of keys to the map, replacing the existing vector if there
* @param vectorToInsert
*/
static void InsertEvalMultKey(const std::vector<LPEvalKey<Element>>& vectorToInsert);
static bool SerializeEvalSumKey(Serialized* serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool SerializeEvalSumKey(Serialized* serObj, const string& id) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool SerializeEvalSumKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool DeserializeEvalSumKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
/**
* SerializeEvalSumKey for a single EvalSum key or all of the EvalSum keys
*
* @param ser - stream to serialize to
* @param sertype - type of serialization
* @param id - key to serialize; empty string means all keys
* @return true on success
*/
template<typename ST>
static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, string id = "") {
decltype(evalSumKeyMap)* smap;
decltype(evalSumKeyMap) omap;
if( id.length() == 0 )
smap = &evalSumKeyMap;
else {
auto k = evalSumKeyMap.find(id);
if( k == evalSumKeyMap.end() )
return false; // no such id
smap = &omap;
omap[ k->first ] = k->second;
}
Serial::Serialize(*smap, ser, sertype);
return true;
}
/**
* SerializeEvalSumKey for all of the EvalSum keys for a context
*
* @param ser - stream to serialize to
* @param sertype - type of serialization
* @param cc - context
* @return true on success
*/
template<typename ST>
static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) {
decltype(evalSumKeyMap) omap;
for( const auto& k : evalSumKeyMap ) {
if( k.second->begin()->second->GetCryptoContext() == cc ) {
omap[k.first] = k.second;
}
}
if( omap.size() == 0 )
return false;
Serial::Serialize(omap, ser, sertype);
return true;
}
/**
* DeserializeEvalSumKey deserialize all keys in the serialization
* deserialized keys silently replace any existing matching keys
* deserialization will create CryptoContextImpl if necessary
*
* @param ser - stream to serialize from
* @param sertype - type of serialization
* @return true on success
*/
template<typename ST>
static bool DeserializeEvalSumKey(std::istream& ser, const ST& sertype) {
decltype(evalSumKeyMap) evalSumKeys;
Serial::Deserialize(evalSumKeys, ser, sertype);
// The deserialize call created any contexts that needed to be created.... so all we need to do
// is put the keys into the maps for their context
for( auto k : evalSumKeys ) {
evalSumKeyMap[ k.first ] = k.second;
}
return true;
}
/**
* ClearEvalSumKeys - flush EvalSumKey cache
*/
static void ClearEvalSumKeys();
/**
* ClearEvalSumKeys - flush EvalSumKey cache for a given id
* @param id
*/
static void ClearEvalSumKeys(const string& id);
/**
* ClearEvalSumKeys - flush EvalSumKey cache for a given context
* @param cc
*/
static void ClearEvalSumKeys(const CryptoContext<Element> cc);
/**
* InsertEvalSumKey - add the given map of keys to the map, replacing the existing map if there
* @param mapToInsert
*/
static void InsertEvalSumKey(const shared_ptr<std::map<usint,LPEvalKey<Element>>> mapToInsert);
static bool SerializeEvalAutomorphismKey(Serialized* serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool SerializeEvalAutomorphismKey(Serialized* serObj, const string& id) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool SerializeEvalAutomorphismKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static bool DeserializeEvalAutomorphismKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
/**
* SerializeEvalAutomorphismKey for a single EvalAuto key or all of the EvalAuto keys
*
* @param ser - stream to serialize to
* @param sertype - type of serialization
* @param id - key to serialize; empty string means all keys
* @return true on success
*/
template<typename ST>
static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, string id = "") {
decltype(evalAutomorphismKeyMap)* smap;
decltype(evalAutomorphismKeyMap) omap;
if( id.length() == 0 )
smap = &evalAutomorphismKeyMap;
else {
auto k = evalAutomorphismKeyMap.find(id);
if( k == evalAutomorphismKeyMap.end() )
return false; // no such id
smap = &omap;
omap[ k->first ] = k->second;
}
Serial::Serialize(*smap, ser, sertype);
return true;
}
/**
* SerializeEvalAutomorphismKey for all of the EvalAuto keys for a context
*
* @param ser - stream to serialize to
* @param sertype - type of serialization
* @param cc - context
* @return true on success
*/
template<typename ST>
static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) {
decltype(evalAutomorphismKeyMap) omap;
for( const auto& k : evalAutomorphismKeyMap ) {
if( k.second->begin()->second->GetCryptoContext() == cc ) {
omap[k.first] = k.second;
}
}
if( omap.size() == 0 )
return false;
Serial::Serialize(omap, ser, sertype);
return true;
}
/**
* DeserializeEvalAutomorphismKey deserialize all keys in the serialization
* deserialized keys silently replace any existing matching keys
* deserialization will create CryptoContextImpl if necessary
*
* @param ser - stream to serialize from
* @param sertype - type of serialization
* @return true on success
*/
template<typename ST>
static bool DeserializeEvalAutomorphismKey(std::istream& ser, const ST& sertype) {
decltype(evalAutomorphismKeyMap) evalSumKeys;
Serial::Deserialize(evalSumKeys, ser, sertype);
// The deserialize call created any contexts that needed to be created.... so all we need to do
// is put the keys into the maps for their context
for( auto k : evalSumKeys ) {
evalAutomorphismKeyMap[ k.first ] = k.second;
}
return true;
}
/**
* ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache
*/
static void ClearEvalAutomorphismKeys();
/**
* ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given id
* @param id
*/
static void ClearEvalAutomorphismKeys(const string& id);
/**
* ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given context
* @param cc
*/
static void ClearEvalAutomorphismKeys(const CryptoContext<Element> cc);
/**
* InsertEvalAutomorphismKey - add the given map of keys to the map, replacing the existing map if there
* @param mapToInsert
*/
static void InsertEvalAutomorphismKey(const shared_ptr<std::map<usint,LPEvalKey<Element>>> mapToInsert);
// TURN FEATURES ON
/**
* Enable a particular feature for use with this CryptoContextImpl
* @param feature - the feature that should be enabled
*/
void Enable(PKESchemeFeature feature) { scheme->Enable(feature); }
/**
* Enable several features at once
* @param featureMask - bitwise or of several PKESchemeFeatures
*/
void Enable(usint featureMask) { scheme->Enable(featureMask); }
// GETTERS
/**
* Getter for Scheme
* @return scheme
*/
const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; }
/**
* Getter for CryptoParams
* @return params
*/
const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; }
const size_t GetKeyGenLevel() const { return m_keyGenLevel; }
void SetKeyGenLevel(size_t level) { m_keyGenLevel = level; }
/**
* Getter for element params
* @return
*/
const shared_ptr<typename Element::Params> GetElementParams() const { return params->GetElementParams(); }
/**
* Getter for encoding params
* @return
*/
const EncodingParams GetEncodingParams() const { return params->GetEncodingParams(); }
/**
* Get the cyclotomic order used for this context
*
* @return
*/
const usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); }
/**
* Get the ring dimension used for this context
*
* @return
*/
const usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); }
/**
* Get the ciphertext modulus used for this context
*
* @return
*/
const typename Element::Integer& GetModulus() const { return params->GetElementParams()->GetModulus(); }
/**
* Get the ciphertext modulus used for this context
*
* @return
*/
const typename Element::Integer& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); }
/**
* KeyGen generates a key pair using this algorithm's KeyGen method
* @return a public/secret key pair
*/
LPKeyPair<Element> KeyGen() {
TimeVar t;
if( doTiming ) TIC(t);
auto r = GetEncryptionAlgorithm()->KeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), false);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpKeyGen, TOC_US(t)) );
}
return r;
}
/**
* KeyGen generates a Multiparty key pair using this algorithm's KeyGen method from two keys
* @param pk first public key used to coordinate the creation of later public keys.
* @return a public/secret key pair
*/
LPKeyPair<Element> MultipartyKeyGen(
const LPPublicKey<Element> pk, bool makeSparse=false, bool pre=false) {
TimeVar t;
if( doTiming ) TIC(t);
auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), pk, makeSparse, pre);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKey, TOC_US(t)) );
}
return r;
}
/**
* KeyGen generates a Multiparty key pair using a vector of secret keys
* @param secretKeys a vector of the secret keys to be used for multiparty computation.
* @return a public/secret key pair
*/
LPKeyPair<Element> MultipartyKeyGen(
const vector<LPPrivateKey<Element>>& secretKeys) {
TimeVar t;
if( doTiming ) TIC(t);
auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), secretKeys, false);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKeyvec, TOC_US(t)) );
}
return r;
}
/**
* Lead Multiparty Decryption method for PALISADE multiparty operations.
* This should be performed by exactly one of the clients.
* All other clients should perform the MultipartyDecryptMain operation.
* @param privateKey the secret key of the lead decryption client
* @param ciphertext vector of encrypted ciphertext
* @return vector of partially decrypted ciphertexts
*/
vector<Ciphertext<Element>> MultipartyDecryptLead(
const LPPrivateKey<Element> privateKey,
const vector<Ciphertext<Element>>& ciphertext) const
{
if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) )
throw std::logic_error("Information passed to MultipartyDecryptLead was not generated with this crypto context");
vector<Ciphertext<Element>> newCiphertext;
TimeVar t;
if( doTiming ) TIC(t);
for( size_t i = 0; i < ciphertext.size(); i++ ) {
if( ciphertext[i] == NULL || Mismatched(ciphertext[i]->GetCryptoContext()) )
throw std::logic_error("A ciphertext passed to MultipartyDecryptLead was not generated with this crypto context");
newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptLead(privateKey, ciphertext[i]) );
}
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpMultiPartyDecryptLead, TOC_US(t)) );
}
return newCiphertext;
}
/**
* Multiparty decryption method for PALISADE multiparty operations.
* The lead multiparty decryption operation should be performed by exactly one of the clients.
* All other clients should perform this MultipartyDecryptMain operation.
* @param privateKey - for decryption
* @param ciphertext - vector of encrypted ciphertext
* @return vector of partially decrypted ciphertexts
*/
vector<Ciphertext<Element>> MultipartyDecryptMain(
const LPPrivateKey<Element> privateKey,
const vector<Ciphertext<Element>>& ciphertext) const
{
if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) )
throw std::logic_error("Information passed to MultipartyDecryptMain was not generated with this crypto context");
vector<Ciphertext<Element>> newCiphertext;
TimeVar t;
if( doTiming ) TIC(t);
for( size_t i = 0; i < ciphertext.size(); i++ ) {
if( ciphertext[i] == NULL || Mismatched(ciphertext[i]->GetCryptoContext()) )
throw std::logic_error("A ciphertext passed to MultipartyDecryptMain was not generated with this crypto context");
newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptMain(privateKey, ciphertext[i]) );
}
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpMultiPartyDecryptMain, TOC_US(t)) );
}
return newCiphertext;
}
/**
* Final multiparty decryption method to fuse the partially decrypted ciphertexts into a decrypted plaintext.
* The lead multiparty decryption operation should be performed by exactly one of the clients.
* All other clients should perform the MultipartyDecryptMain operation.
* @param partialCiphertextVec - vector of partially decrypted ciphertexts.
* @param plaintext - pointer to destination for the result of decryption
* @param doPadding - true if input plaintext was padded; causes unpadding on last piece of ciphertext
* @return size of plaintext
*/
DecryptResult MultipartyDecryptFusion(
const vector<Ciphertext<Element>>& partialCiphertextVec,
Plaintext *plaintext) const
{
DecryptResult result;
//Make sure we're processing ciphertexts.
size_t last_ciphertext = partialCiphertextVec.size();
if ( last_ciphertext < 1 )
return result;
TimeVar t;
if( doTiming ) TIC(t);
for( size_t i = 0; i < last_ciphertext; i++ ) {
if (partialCiphertextVec[i] == NULL || Mismatched(partialCiphertextVec[i]->GetCryptoContext()))
throw std::logic_error("A ciphertext passed to MultipartyDecryptFusion was not generated with this crypto context");
if (partialCiphertextVec[i]->GetEncodingType() != partialCiphertextVec[0]->GetEncodingType())
throw std::logic_error("Ciphertexts passed to MultipartyDecryptFusion have mismatched encoding types");
}
// determine which type of plaintext that you need to decrypt into
Plaintext decrypted = GetPlaintextForDecrypt(partialCiphertextVec[0]->GetEncodingType(), partialCiphertextVec[0]->GetElements()[0].GetParams(), this->GetEncodingParams());
if ((partialCiphertextVec[0]->GetEncodingType() == CKKSPacked) && (typeid(Element) != typeid(NativePoly)))
result = GetEncryptionAlgorithm()->MultipartyDecryptFusion(partialCiphertextVec, &decrypted->GetElement<Poly>());
else
result = GetEncryptionAlgorithm()->MultipartyDecryptFusion(partialCiphertextVec, &decrypted->GetElement<NativePoly>());
if (result.isValid == false) return result;
if (partialCiphertextVec[0]->GetEncodingType() == CKKSPacked){
shared_ptr<CKKSPackedEncoding> decryptedCKKS = std::dynamic_pointer_cast<CKKSPackedEncoding>(decrypted);
const shared_ptr<LPCryptoParametersCKKS<DCRTPoly>> cryptoParamsCKKS =
std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>(this->GetCryptoParameters());
decryptedCKKS->Decode(partialCiphertextVec[0]->GetDepth(),
partialCiphertextVec[0]->GetScalingFactor(),
cryptoParamsCKKS->GetRescalingTechnique());
}
else
decrypted->Decode();
*plaintext = decrypted;
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpMultiPartyDecryptFusion, TOC_US(t)) );
}
return result;
}
/**
* SparseKeyGen generates a key pair with special structure, and without full entropy,
* for use in special cases like Ring Reduction
* @return a public/secret key pair
*/
LPKeyPair<Element> SparseKeyGen() {
TimeVar t;
if( doTiming ) TIC(t);
auto r = GetEncryptionAlgorithm()->KeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), true);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpSparseKeyGen, TOC_US(t)) );
}
return r;
}
/**
* ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption
* @param newKey (public)
* @param oldKey (private)
* @return new evaluation key
*/
LPEvalKey<Element> ReKeyGen(
const LPPublicKey<Element> newKey,
const LPPrivateKey<Element> oldKey) const {
if( newKey == NULL || oldKey == NULL ||
Mismatched(newKey->GetCryptoContext()) ||
Mismatched(oldKey->GetCryptoContext()) )
throw std::logic_error("Keys passed to ReKeyGen were not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpReKeyGenPubPri, TOC_US(t)) );
}
return r;
}
/**
* ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption
* NOTE this functionality has been completely removed from PALISADE
* @param newKey (private)
* @param oldKey (private)
* @return new evaluation key
*/
LPEvalKey<Element> ReKeyGen(
const LPPrivateKey<Element> newKey,
const LPPrivateKey<Element> oldKey) const
__attribute__ ((deprecated("functionality removed from PALISADE")));
/**
* EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult operator
* @param key
* @return new evaluation key
*/
void EvalMultKeyGen(const LPPrivateKey<Element> key);
/**
* EvalMultsKeyGen creates a vector evalmult keys that can be used with the PALISADE EvalMult operator
* 1st key (for s^2) is used for multiplication of ciphertexts of depth 1
* 2nd key (for s^3) is used for multiplication of ciphertexts of depth 2, etc.
*
* @param key
* @return a vector of evaluation keys
*/
void EvalMultKeysGen(const LPPrivateKey<Element> key);
/**
* GetEvalMultKeyVector fetches the eval mult keys for a given KeyID
* @param keyID
* @return key vector from ID
*/
static const vector<LPEvalKey<Element>>& GetEvalMultKeyVector(const string& keyID);
/**
* GetEvalMultKeys
* @return map of all the keys
*/
static const std::map<string,std::vector<LPEvalKey<Element>>>& GetAllEvalMultKeys();
/**
* KeySwitchGen creates a key that can be used with the PALISADE KeySwitch operation
* @param key1
* @param key2
* @return new evaluation key
*/
LPEvalKey<Element> KeySwitchGen(
const LPPrivateKey<Element> key1, const LPPrivateKey<Element> key2) const {
if( key1 == NULL || key2 == NULL ||
Mismatched(key1->GetCryptoContext()) ||
Mismatched(key2->GetCryptoContext()) )
throw std::logic_error("Keys passed to KeySwitchGen were not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpKeySwitchGen, TOC_US(t)) );
}
return r;
}
/**
* Encrypt a plaintext using a given public key
* @param publicKey
* @param plaintext
* @return ciphertext (or null on failure)
*/
Ciphertext<Element> Encrypt(
const LPPublicKey<Element> publicKey,
Plaintext plaintext)
{
if( publicKey == NULL )
throw std::logic_error("null key passed to Encrypt");
if( plaintext == NULL )
throw std::logic_error("null plaintext passed to Encrypt");
if( Mismatched(publicKey->GetCryptoContext()) )
throw std::logic_error("key passed to Encrypt was not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, plaintext->GetElement<Element>());
if (ciphertext) {
ciphertext->SetEncodingType( plaintext->GetEncodingType() );
ciphertext->SetScalingFactor( plaintext->GetScalingFactor() );
ciphertext->SetDepth( plaintext->GetDepth() );
ciphertext->SetLevel( plaintext->GetLevel() );
}
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEncryptPub, TOC_US(t)) );
}
return ciphertext;
}
/**
* Encrypt a plaintext using a given private key
* @param privateKey
* @param plaintext
* @return ciphertext (or null on failure)
*/
Ciphertext<Element> Encrypt(
const LPPrivateKey<Element> privateKey,
Plaintext plaintext) const
{
if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) )
throw std::logic_error("key passed to Encrypt was not generated with this crypto context");
if( plaintext == NULL )
throw std::logic_error("null plaintext passed to Encrypt");
TimeVar t;
if( doTiming ) TIC(t);
Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(privateKey, plaintext->GetElement<Element>());
if (ciphertext) {
ciphertext->SetEncodingType( plaintext->GetEncodingType() );
ciphertext->SetScalingFactor( plaintext->GetScalingFactor() );
ciphertext->SetDepth( plaintext->GetDepth() );
ciphertext->SetLevel( plaintext->GetLevel() );
}
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEncryptPriv, TOC_US(t)) );
}
return ciphertext;
}
/**
* Encrypt a matrix of Plaintext
* @param publicKey - for encryption
* @param plaintext - to encrypt
* @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false
* @return a vector of pointers to Ciphertexts created by encrypting the plaintext
*/
shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix(
const LPPublicKey<Element> publicKey,
Matrix<Plaintext> &plaintext)
{
if (publicKey == NULL || Mismatched(publicKey->GetCryptoContext()))
throw std::logic_error("key passed to EncryptMatrix was not generated with this crypto context");
auto zeroAlloc = [=]() { return RationalCiphertext<Element>(publicKey->GetCryptoContext(), true); };
shared_ptr<Matrix<RationalCiphertext<Element>>> cipherResults(new Matrix<RationalCiphertext<Element>>
(zeroAlloc, plaintext.GetRows(), plaintext.GetCols()));
TimeVar t;
if( doTiming ) TIC(t);
for (size_t row = 0; row < plaintext.GetRows(); row++)
{
for (size_t col = 0; col < plaintext.GetCols(); col++)
{
if( plaintext(row,col)->Encode() == false )
return 0;
Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, plaintext(row,col)->GetElement<Element>());
if (ciphertext) {
ciphertext->SetEncodingType( plaintext(row,col)->GetEncodingType() );
}
(*cipherResults)(row, col).SetNumerator(ciphertext);
}
}
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEncryptMatrixPlain, TOC_US(t)) );
}
return cipherResults;
}
/**
* Encrypt a matrix of Plaintext
* @param publicKey - for encryption
* @param plaintext - to encrypt
* @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false
* @return a vector of pointers to Ciphertexts created by encrypting the plaintext
*/
Matrix<Ciphertext<Element>> EncryptMatrixCiphertext(
const LPPublicKey<Element> publicKey,
Matrix<Plaintext> &plaintext)
{
if (publicKey == NULL || Mismatched(publicKey->GetCryptoContext()))
throw std::logic_error("key passed to EncryptMatrix was not generated with this crypto context");
auto zeroAlloc = [=]() { return Ciphertext<Element>(new CiphertextImpl<Element>(publicKey->GetCryptoContext())); };
Matrix<Ciphertext<Element>> cipherResults(zeroAlloc, plaintext.GetRows(), plaintext.GetCols());
TimeVar t;
if( doTiming ) TIC(t);
for (size_t row = 0; row < plaintext.GetRows(); row++)
{
for (size_t col = 0; col < plaintext.GetCols(); col++)
{
if( plaintext(row,col)->Encode() == false )
throw std::logic_error("Plaintext is not encoded");
Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, plaintext(row,col)->GetElement<Element>());
if (ciphertext) {
ciphertext->SetEncodingType( plaintext(row,col)->GetEncodingType() );
}
cipherResults(row, col) = (ciphertext);
}
}
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEncryptMatrixPlain, TOC_US(t)) );
}
return cipherResults;
}
/**
* Perform an encryption by reading plaintext from a stream, serializing each piece of ciphertext,
* and writing the serializations to an output stream
* @param publicKey - the encryption key in use
* @param instream - where to read the input from
* @param ostream - where to write the serialization to
* @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false
* @return
*/
void EncryptStream(
const LPPublicKey<Element> publicKey,
std::istream& instream,
std::ostream& outstream) const __attribute__ ((deprecated("serialization changed, see wiki for details")));
// PLAINTEXT FACTORY METHODS
// FIXME to be deprecated in 2.0
/**
* MakeScalarPlaintext constructs a ScalarEncoding in this context
* @param value
* @param isSigned
* @return plaintext
*/
Plaintext MakeScalarPlaintext(int64_t value) const {
auto p = PlaintextFactory::MakePlaintext( Scalar, this->GetElementParams(), this->GetEncodingParams(), value );
return p;
}
/**
* MakeStringPlaintext constructs a StringEncoding in this context
* @param str
* @return plaintext
*/
Plaintext MakeStringPlaintext(const string& str) const {
auto p = PlaintextFactory::MakePlaintext( String, this->GetElementParams(), this->GetEncodingParams(), str );
return p;
}
/**
* MakeIntegerPlaintext constructs an IntegerEncoding in this context
* @param value
* @return plaintext
*/
Plaintext MakeIntegerPlaintext(int64_t value) const {
auto p = PlaintextFactory::MakePlaintext( Integer, this->GetElementParams(), this->GetEncodingParams(), value );
return p;
}
/**
* MakeIntegerPlaintext constructs a FractionalEncoding in this context
* @param value
* @param truncatedBits limit on fractional
* @return plaintext
*/
Plaintext MakeFractionalPlaintext(int64_t value, size_t truncatedBits = 0) const {
auto p = PlaintextFactory::MakePlaintext( Fractional, this->GetElementParams(), this->GetEncodingParams(), value, truncatedBits );
return p;
}
/**
* MakeCoefPackedPlaintext constructs a CoefPackedEncoding in this context
* @param value
* @return plaintext
*/
Plaintext MakeCoefPackedPlaintext(const vector<int64_t>& value) const {
auto p = PlaintextFactory::MakePlaintext( CoefPacked, this->GetElementParams(), this->GetEncodingParams(), value );
return p;
}
/**
* MakePackedPlaintext constructs a PackedEncoding in this context
* @param value
* @return plaintext
*/
Plaintext MakePackedPlaintext(const vector<int64_t>& value) const {
auto p = PlaintextFactory::MakePlaintext( Packed, this->GetElementParams(), this->GetEncodingParams(), value );
return p;
}
/**
* MakePlaintext static that takes a cc and calls the Plaintext Factory
* @param encoding
* @param cc
* @param value
* @return
*/
template<typename Value1>
static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value) {
return PlaintextFactory::MakePlaintext( encoding, cc->GetElementParams(), cc->GetEncodingParams(), value );
}
template<typename Value1, typename Value2>
static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value, const Value2& value2) {
return PlaintextFactory::MakePlaintext( encoding, cc->GetElementParams(), cc->GetEncodingParams(), value, value2 );
}
/**
* MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context
* @param value
* @return plaintext
*/
Plaintext MakeCKKSPackedPlaintext(const std::vector<std::complex<double>> &value,
size_t depth=1, uint32_t level=0,
const shared_ptr<typename Element::Params> params=nullptr) const {
Plaintext p;
const shared_ptr<LPCryptoParametersCKKS<DCRTPoly>> cryptoParamsCKKS =
std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>(this->GetCryptoParameters());
double ptxtMod = cryptoParamsCKKS->GetEncodingParams()->GetPlaintextModulus();
double scFact = 1.0;
if (cryptoParamsCKKS->GetRescalingTechnique() == EXACTRESCALE) {
scFact = cryptoParamsCKKS->GetScalingFactorOfLevel(level);
} else {
scFact = pow(2, ptxtMod);
}
if (params == nullptr) {
shared_ptr<ILDCRTParams<DCRTPoly::Integer>> elemParamsPtr;
if (level != 0) {
ILDCRTParams<DCRTPoly::Integer> elemParams = *(cryptoParamsCKKS->GetElementParams());
for (uint32_t i=0; i<level; i++) {
elemParams.PopLastParam();
}
elemParamsPtr = std::make_shared<ILDCRTParams<DCRTPoly::Integer>>(elemParams);
} else {
elemParamsPtr = cryptoParamsCKKS->GetElementParams();
}
p = Plaintext( new CKKSPackedEncoding( elemParamsPtr, this->GetEncodingParams(), value, depth, level, scFact) );
} else
p = Plaintext( new CKKSPackedEncoding( params, this->GetEncodingParams(), value, depth, level, scFact) );
p->Encode();
return p;
}
/**
* GetPlaintextForDecrypt returns a new Plaintext to be used in decryption.
*
* @param pte Type of plaintext we want to return
* @param evp Element parameters
* @param ep Encoding parameters
* @return plaintext
*/
static Plaintext
GetPlaintextForDecrypt(PlaintextEncodings pte, shared_ptr<typename Element::Params> evp, EncodingParams ep) {
shared_ptr<typename NativePoly::Params> vp(
new typename NativePoly::Params(evp->GetCyclotomicOrder(), ep->GetPlaintextModulus(), 1) );
Plaintext tempPlaintext;
if (pte == CKKSPacked)
{
if (evp->GetModulus().GetMSB() < MAX_MODULUS_SIZE + 1)
tempPlaintext = PlaintextFactory::MakePlaintext(pte, vp, ep);
else
tempPlaintext = PlaintextFactory::MakePlaintext(pte, evp, ep);
}
else
tempPlaintext = PlaintextFactory::MakePlaintext(pte, vp, ep);
return tempPlaintext;
}
public:
/**
* Decrypt a single ciphertext into the appropriate plaintext
*
* @param privateKey - decryption key
* @param ciphertext - ciphertext to decrypt
* @param plaintext - resulting plaintext object pointer is here
* @return
*/
DecryptResult Decrypt(
const LPPrivateKey<Element> privateKey,
ConstCiphertext<Element> ciphertext,
Plaintext* plaintext)
{
if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) )
throw std::logic_error("Information passed to Decrypt was not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
// determine which type of plaintext that you need to decrypt into
//Plaintext decrypted = GetPlaintextForDecrypt(ciphertext->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
Plaintext decrypted = GetPlaintextForDecrypt(ciphertext->GetEncodingType(), ciphertext->GetElements()[0].GetParams(), this->GetEncodingParams());
DecryptResult result;
if ((ciphertext->GetEncodingType() == CKKSPacked) && (typeid(Element) != typeid(NativePoly))) {
if (typeid(Element) == typeid(DCRTPoly))
{
if (ciphertext->GetElements()[0].GetModulus().GetMSB() < MAX_MODULUS_SIZE + 1) // only one tower in DCRTPoly
result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<NativePoly>());
else
result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<Poly>());
}
else
result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<Poly>());
}
else
result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<NativePoly>());
if (result.isValid == false) return result;
if (ciphertext->GetEncodingType() == CKKSPacked){
shared_ptr<CKKSPackedEncoding> decryptedCKKS = std::dynamic_pointer_cast<CKKSPackedEncoding>(decrypted);
decryptedCKKS->SetDepth(ciphertext->GetDepth());
decryptedCKKS->SetLevel(ciphertext->GetLevel());
decryptedCKKS->SetScalingFactor(ciphertext->GetScalingFactor());
const shared_ptr<LPCryptoParametersCKKS<DCRTPoly>> cryptoParamsCKKS =
std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>(this->GetCryptoParameters());
decryptedCKKS->Decode(ciphertext->GetDepth(), ciphertext->GetScalingFactor(), cryptoParamsCKKS->GetRescalingTechnique());
}
else
decrypted->Decode();
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpDecrypt, TOC_US(t)) );
}
*plaintext = decrypted;
return result;
}
/**
* Decrypt method for a matrix of ciphertexts
* @param privateKey - for decryption
* @param ciphertext - matrix of encrypted ciphertexts
* @param plaintext - pointer to the destination martrix of plaintexts
* @return size of plaintext
*/
DecryptResult DecryptMatrix(
const LPPrivateKey<Element> privateKey,
const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext,
shared_ptr<Matrix<Plaintext>> *numerator,
shared_ptr<Matrix<Plaintext>> *denominator) const
{
// edge case
if ((ciphertext->GetCols()== 0) && (ciphertext->GetRows() == 0))
return DecryptResult();
if (privateKey == NULL || Mismatched(privateKey->GetCryptoContext()))
throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context");
const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator();
// need to build matrices for the result
Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
auto zeroPackingAlloc = [=]() { return Plaintext(ptx); };
*numerator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) );
*denominator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) );
TimeVar t;
if( doTiming ) TIC(t);
for (size_t row = 0; row < ciphertext->GetRows(); row++)
{
for (size_t col = 0; col < ciphertext->GetCols(); col++)
{
if (Mismatched((*ciphertext)(row, col).GetCryptoContext()))
throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context");
const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator();
// determine which type of plaintext that you need to decrypt into
Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>());
if (resultN.isValid == false) return resultN;
(**numerator)(row,col) = decryptedNumerator;
(**numerator)(row,col)->Decode();
Plaintext decryptedDenominator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
if( (*ciphertext)(row,col).GetIntegerFlag() == true ) {
decryptedDenominator->GetElement<Poly>().SetValuesToZero();
decryptedDenominator->GetElement<Poly>().at(0) = 1;
}
else {
const Ciphertext<Element> ctD = (*ciphertext)(row, col).GetDenominator();
DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt(privateKey, ctD, &decryptedDenominator->GetElement<NativePoly>());
if (resultD.isValid == false) return resultD;
(**denominator)(row,col) = decryptedDenominator;
}
(**denominator)(row, col)->Decode();
}
}
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpDecryptMatrixPlain, TOC_US(t)) );
}
return DecryptResult((**numerator)((*numerator)->GetRows()-1,(*numerator)->GetCols()-1)->GetLength());
}
/**
* Decrypt method for a matrix of ciphertexts
* @param privateKey - for decryption
* @param ciphertext - matrix of encrypted ciphertexts
* @param plaintext - pointer to the destination martrix of plaintexts
* @return size of plaintext
*/
DecryptResult DecryptMatrixCiphertext(
const LPPrivateKey<Element> privateKey,
const Matrix<Ciphertext<Element>> ciphertext,
Matrix<Plaintext> *numerator) const
{
// edge case
if ((ciphertext.GetCols()== 0) && (ciphertext.GetRows() == 0))
return DecryptResult();
if (privateKey == NULL || Mismatched(privateKey->GetCryptoContext()))
throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context");
const Ciphertext<Element> ctN = (ciphertext)(0, 0);
// need to build matrices for the result
// Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
// auto zeroPackingAlloc = [=]() { return Plaintext(ptx); };
// numerator = new Matrix<Plaintext>(zeroPackingAlloc, ciphertext.GetRows(), ciphertext.GetCols());
TimeVar t;
if( doTiming ) TIC(t);
for (size_t row = 0; row < ciphertext.GetRows(); row++)
{
for (size_t col = 0; col < ciphertext.GetCols(); col++)
{
if (Mismatched( (ciphertext(row, col))->GetCryptoContext() ))
throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context");
const Ciphertext<Element> ctN = (ciphertext)(row, col);
// determine which type of plaintext that you need to decrypt into
Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>());
if (resultN.isValid == false) return resultN;
(*numerator)(row,col) = decryptedNumerator;
(*numerator)(row,col)->Decode();
}
}
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpDecryptMatrixPlain, TOC_US(t)) );
}
return DecryptResult((*numerator)( numerator->GetRows()-1, numerator->GetCols()-1)->GetLength());
}
/**
* Decrypt method for numerators in a matrix of ciphertexts (packed encoding)
* @param privateKey - for decryption
* @param ciphertext - matrix of encrypted ciphertexts
* @param plaintext - pointer to the destination martrix of plaintexts
* @return size of plaintext
*/
DecryptResult DecryptMatrixNumerator(
const LPPrivateKey<Element> privateKey,
const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext,
shared_ptr<Matrix<Plaintext>> *numerator) const
{
// edge case
if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0))
return DecryptResult();
if (privateKey == NULL || Mismatched(privateKey->GetCryptoContext()))
throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context");
TimeVar t;
if (doTiming) TIC(t);
//force all precomputations to take place in advance
if( Mismatched((*ciphertext)(0, 0).GetCryptoContext()) )
throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context");
const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator();
// need to build a numerator matrix for the result
Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
auto zeroPackingAlloc = [=]() { return Plaintext(ptx); };
*numerator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) );
Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>());
if (resultN.isValid == false) return resultN;
(**numerator)(0, 0) = decryptedNumerator;
(**numerator)(0, 0)->Decode();
for (size_t row = 0; row < ciphertext->GetRows(); row++)
{
#pragma omp parallel for
for (size_t col = 0; col < ciphertext->GetCols(); col++)
{
if (row + col > 0)
{
if( Mismatched((*ciphertext)(row, col).GetCryptoContext()) )
throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context");
const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator();
Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams());
GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>());
(**numerator)(row, col) = decryptedNumerator;
(**numerator)(row, col)->Decode();
}
}
}
if (doTiming) {
timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, TOC_US(t)));
}
return DecryptResult((**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1)->GetLength());
}
/**
* read instream for a sequence of serialized ciphertext; deserialize it, decrypt it, and write it to outstream
* @param privateKey - reference to the decryption key
* @param instream - input stream with sequence of serialized ciphertexts
* @param outstream - output stream for plaintext
* @return total bytes processed
*/
size_t DecryptStream(
const LPPrivateKey<Element> privateKey,
std::istream& instream,
std::ostream& outstream) __attribute__ ((deprecated("serialization changed, see wiki for details")));
/**
* ReEncrypt - Proxy Re Encryption mechanism for PALISADE
* @param evalKey - evaluation key from the PRE keygen method
* @param ciphertext - vector of shared pointers to encrypted Ciphertext
* @param publicKey the public key of the recipient of the re-encrypted ciphertext.
* @return vector of shared pointers to re-encrypted ciphertexts
*/
Ciphertext<Element> ReEncrypt(
LPEvalKey<Element> evalKey,
ConstCiphertext<Element> ciphertext,
const LPPublicKey<Element> publicKey = nullptr) const
{
if( evalKey == NULL || Mismatched(evalKey->GetCryptoContext()) )
throw std::logic_error("Information passed to ReEncrypt was not generated with this crypto context");
if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) )
throw std::logic_error("The ciphertext passed to ReEncrypt was not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
Ciphertext<Element> newCiphertext = GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext, publicKey);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpReEncrypt, TOC_US(t)) );
}
return newCiphertext;
}
/**
* read instream for a serialized ciphertext. deserialize, re-encrypt, serialize, and write to outstream
* @param evalKey - reference to the re-encryption key
* @param instream - input stream with sequence of serialized ciphertext
* @param outstream - output stream with sequence of serialized re-encrypted ciphertext
*/
void ReEncryptStream(
const LPEvalKey<Element> evalKey,
std::istream& instream,
std::ostream& outstream,
const LPPublicKey<Element> publicKey = nullptr) __attribute__ ((deprecated("serialization changed, see wiki for details")));
/**
* EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts
* @param ct1
* @param ct2
* @return new ciphertext for ct1 + ct2
*/
Ciphertext<Element>
EvalAdd(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const
{
TypeCheck(ct1, ct2);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAdd, TOC_US(t)) );
}
return rv;
}
/**
* EvalAdd - PALISADE EvalAddMutable method for a pair of ciphertexts.
* This is a mutable version - input ciphertexts may get automatically
* rescaled, or level-reduced.
*
* @param ct1
* @param ct2
* @return new ciphertext for ct1 + ct2
*/
Ciphertext<Element>
EvalAddMutable(Ciphertext<Element> &ct1, Ciphertext<Element> &ct2) const
{
TypeCheck(ct1, ct2);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ct1, ct2);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAdd, TOC_US(t)) );
}
return rv;
}
/**
* EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of ciphertexts
* @param ct1
* @param ct2
* @return new matrix for ct1 + ct2
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalAddMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const
{
TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited
TimeVar t;
if( doTiming ) TIC(t);
Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2;
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAddMatrix, TOC_US(t)) );
}
shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv));
return a;
}
/**
* EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of ciphertexts
* @param ct1
* @param ct2
* @return new matrix for ct1 + ct2
*/
Matrix<Ciphertext<Element>>
EvalAddMatrix(const Matrix<Ciphertext<Element>> &ct1, const Matrix<Ciphertext<Element>> &ct2) const
{
TypeCheck(ct1(0,0), ct2(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited
TimeVar t;
if( doTiming ) TIC(t);
Matrix<Ciphertext<Element>> rv = ct1 + ct2;
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAddMatrix, TOC_US(t)) );
}
// Matrix<Ciphertext<Element>> a(rv);
return rv;
}
/**
* EvalSub - PALISADE EvalSub method for a pair of ciphertexts
* @param ct1
* @param ct2
* @return new ciphertext for ct1 - ct2
*/
Ciphertext<Element>
EvalSub(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const
{
TypeCheck(ct1, ct2);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalSub, TOC_US(t)) );
}
return rv;
}
/**
* EvalSub - PALISADE EvalSubMutable method for a pair of ciphertexts
* This is a mutable version - input ciphertexts may get automatically
* rescaled, or level-reduced.
*
* @param ct1
* @param ct2
* @return new ciphertext for ct1 - ct2
*/
Ciphertext<Element>
EvalSubMutable(Ciphertext<Element> &ct1, Ciphertext<Element> &ct2) const
{
TypeCheck(ct1, ct2);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ct1, ct2);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalSub, TOC_US(t)) );
}
return rv;
}
/**
* EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of ciphertexts
* @param ct1
* @param ct2
* @return new matrix for ct1 + ct2
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalSubMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const
{
TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited
TimeVar t;
if( doTiming ) TIC(t);
Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2;
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalSubMatrix, TOC_US(t)) );
}
shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv));
return a;
}
/**
* EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of ciphertexts
* @param ct1
* @param ct2
* @return new matrix for ct1 + ct2
*/
Matrix<Ciphertext<Element>>
EvalSubMatrix(const Matrix<Ciphertext<Element>> &ct1, const Matrix<Ciphertext<Element>> &ct2) const
{
TypeCheck(ct1(0,0), ct2(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited
TimeVar t;
if( doTiming ) TIC(t);
Matrix<Ciphertext<Element>> rv = ct1 - ct2;
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalSubMatrix, TOC_US(t)) );
}
Matrix<Ciphertext<Element>> a(rv);
return a;
}
/**
* EvalAdd - PALISADE EvalAdd method for a ciphertext and plaintext
* @param ciphertext
* @param plaintext
* @return new ciphertext for ciphertext + plaintext
*/
Ciphertext<Element>
EvalAdd(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const
{
TypeCheck(ciphertext, plaintext);
TimeVar t;
if( doTiming ) TIC(t);
plaintext->SetFormat(EVALUATION);
auto rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, plaintext);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAddPlain, TOC_US(t)) );
}
return rv;
}
/**
* EvalAdd - PALISADE EvalAddMutable method for a ciphertext and plaintext
* This is a mutable version - input ciphertexts may get automatically
* rescaled, or level-reduced.
*
* @param ciphertext
* @param plaintext
* @return new ciphertext for ciphertext + plaintext
*/
Ciphertext<Element>
EvalAddMutable(Ciphertext<Element> &ciphertext, Plaintext plaintext) const
{
TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext) plaintext);
TimeVar t;
if( doTiming ) TIC(t);
plaintext->SetFormat(EVALUATION);
auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ciphertext, plaintext);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAddPlain, TOC_US(t)) );
}
return rv;
}
/**
* EvalAdd - PALISADE EvalAdd method for a ciphertext and constant
* @param ciphertext
* @param constant
* @return new ciphertext for ciphertext + constant
*/
Ciphertext<Element>
EvalAdd(ConstCiphertext<Element> ciphertext, double constant) const
{
TimeVar t;
Ciphertext<Element> rv;
if ( constant >= 0 ) {
if( doTiming ) TIC(t);
rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, constant);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAddConst, TOC_US(t)) );
}
} else {
TimeVar t;
if( doTiming ) TIC(t);
rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, -constant);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAddConst, TOC_US(t)) );
}
}
return rv;
}
/**
* EvalLinearWSum - PALISADE EvalLinearWSum method to compute a linear weighted sum
*
* @param ciphertexts a list of ciphertexts
* @param constants a list of weights
* @return new ciphertext containing the weighted sum
*/
Ciphertext<Element> EvalLinearWSum(
vector<Ciphertext<Element>> ciphertexts,
vector<double> constants) const
{
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalLinearWSum(ciphertexts, constants);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalLinearWSum, TOC_US(t)) );
}
return rv;
}
/**
* EvalLinearWSum - method to compute a linear weighted sum.
* This is a mutable version, meaning the level/depth of input
* ciphertexts may change in the process.
*
* @param ciphertexts a list of ciphertexts
* @param constants a list of weights
* @return new ciphertext containing the weighted sum
*/
Ciphertext<Element> EvalLinearWSumMutable(
vector<Ciphertext<Element>> ciphertexts,
vector<double> constants) const
{
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalLinearWSumMutable(ciphertexts, constants);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalLinearWSum, TOC_US(t)) );
}
return rv;
}
inline Ciphertext<Element>
EvalLinearWSum(vector<double> constants,
vector<Ciphertext<Element>> ciphertexts) const
{
return EvalLinearWSum(ciphertexts, constants);
}
inline Ciphertext<Element>
EvalLinearWSumMutable(vector<double> constants,
vector<Ciphertext<Element>> ciphertexts) const
{
return EvalLinearWSumMutable(ciphertexts, constants);
}
inline Ciphertext<Element>
EvalAdd(ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const
{
return EvalAdd(ciphertext, plaintext);
}
inline Ciphertext<Element>
EvalAddMutable(Plaintext plaintext, Ciphertext<Element> &ciphertext) const
{
return EvalAddMutable(ciphertext, plaintext);
}
inline Ciphertext<Element>
EvalAdd(double constant, ConstCiphertext<Element> ciphertext) const
{
return EvalAdd(ciphertext, constant);
}
/**
* EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext
* @param ciphertext
* @param plaintext
* @return new ciphertext for ciphertext - plaintext
*/
Ciphertext<Element>
EvalSub(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const
{
TypeCheck(ciphertext, plaintext);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, plaintext);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalSubPlain, TOC_US(t)) );
}
return rv;
}
/**
* EvalSubPlain - PALISADE EvalSubMutable method for a ciphertext and plaintext
* This is a mutable version - input ciphertexts may get automatically
* rescaled, or level-reduced.
*
* @param ciphertext
* @param plaintext
* @return new ciphertext for ciphertext - plaintext
*/
Ciphertext<Element>
EvalSubMutable(Ciphertext<Element> &ciphertext, Plaintext plaintext) const
{
TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext) plaintext);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ciphertext, plaintext);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalSubPlain, TOC_US(t)) );
}
return rv;
}
/**
* EvalSub - PALISADE EvalSub method for a ciphertext and constant
* @param ciphertext
* @param constant
* @return new ciphertext for ciphertext - constant
*/
Ciphertext<Element>
EvalSub(ConstCiphertext<Element> ciphertext, double constant) const
{
TimeVar t;
Ciphertext<Element> rv;
if ( constant >= 0 ) {
if( doTiming ) TIC(t);
rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, constant);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalSubConst, TOC_US(t)) );
}
} else {
if( doTiming ) TIC(t);
rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, -constant);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalSubConst, TOC_US(t)) );
}
}
return rv;
}
inline Ciphertext<Element>
EvalSub(ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const
{
return EvalAdd(EvalNegate(ciphertext), plaintext);
}
inline Ciphertext<Element>
EvalSubMutable(Plaintext plaintext, Ciphertext<Element> &ciphertext) const
{
Ciphertext<Element> negated = EvalNegate(ciphertext);
Ciphertext<Element> result = EvalAddMutable(negated, plaintext);
ciphertext = EvalNegate(negated);
return result;
}
inline Ciphertext<Element>
EvalSub(double constant, ConstCiphertext<Element> ciphertext) const
{
return EvalAdd(EvalNegate(ciphertext), constant);
}
/**
* EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key switching
* @param ct1
* @param ct2
* @return new ciphertext for ct1 * ct2
*/
Ciphertext<Element>
EvalMult(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const
{
TypeCheck(ct1, ct2);
auto ek = GetEvalMultKeyVector(ct1->GetKeyTag());
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek[0]);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) );
}
return rv;
}
/**
* EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key switching
* This is a mutable version - input ciphertexts may get automatically
* rescaled, or level-reduced.
*
* @param ct1
* @param ct2
* @return new ciphertext for ct1 * ct2
*/
Ciphertext<Element>
EvalMultMutable(Ciphertext<Element> &ct1, Ciphertext<Element> &ct2) const
{
TypeCheck(ct1, ct2);
auto ek = GetEvalMultKeyVector(ct1->GetKeyTag());
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, ct2, ek[0]);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) );
}
return rv;
}
/**
* EvalMult - PALISADE EvalMult method for a pair of ciphertexts - no key switching (relinearization)
* @param ct1
* @param ct2
* @return new ciphertext for ct1 * ct2
*/
Ciphertext<Element>
EvalMultNoRelin(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const
{
TypeCheck(ct1, ct2);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) );
}
return rv;
}
/**
* EvalMultMany - PALISADE function for evaluating multiplication on ciphertext followed by relinearization operation (at the end).
* It computes the multiplication in a binary tree manner. Also, it reduces the number of
* elements in the ciphertext to two after each multiplication.
* Currently it assumes that the consecutive two input arguments have
* total depth smaller than the supported depth. Otherwise, it throws an error.
*
* @param cipherTextList is the ciphertext list.
*
* @return new ciphertext.
*/
Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& ct) const{
const auto ek = GetEvalMultKeyVector(ct[0]->GetKeyTag());
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMultMany(ct, ek);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMultMany, TOC_US(t)) );
}
return rv;
}
/**
* EvalAddMany - Evaluate addition on a vector of ciphertexts.
* It computes the addition in a binary tree manner.
*
* @param ctList is the list of ciphertexts.
*
* @return new ciphertext.
*/
Ciphertext<Element> EvalAddMany(const vector<Ciphertext<Element>>& ctList) const{
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalAddMany(ctList);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAddMany, TOC_US(t)) );
}
return rv;
}
/**
* EvalAddManyInPlace - Evaluate addition on a vector of ciphertexts.
* Addition is computed in a binary tree manner. Difference with EvalAddMany
* is that EvalAddManyInPlace uses the input ciphertext vector to store
* intermediate results, to avoid the overhead of using extra tepmorary
* space.
*
* @param ctList is the list of ciphertexts.
*
* @return new ciphertext.
*/
Ciphertext<Element> EvalAddManyInPlace(vector<Ciphertext<Element>>& ctList) const{
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalAddManyInPlace(ctList);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAddManyInPlace, TOC_US(t)) );
}
return rv;
}
/**
* Function for evaluating multiplication on ciphertext followed by relinearization operation.
* Currently it assumes that the input arguments have total depth smaller than the supported depth. Otherwise, it throws an error.
*
* @param ct1 first input ciphertext.
* @param ct2 second input ciphertext.
*
* @return new ciphertext
*/
Ciphertext<Element> EvalMultAndRelinearize(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const {
const auto ek = GetEvalMultKeyVector(ct1->GetKeyTag());
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMultAndRelinearize(ct1, ct2, ek);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) );
}
return rv;
}
/**
* Function for relinearization of a ciphertext.
*
* @param ct input ciphertext.
*
* @return relinearized ciphertext
*/
Ciphertext<Element> Relinearize(ConstCiphertext<Element> ct) const {
const auto ek = GetEvalMultKeyVector(ct->GetKeyTag());
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->Relinearize(ct, ek);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalRelin, TOC_US(t)) );
}
return rv;
}
/**
* EvalMult - PALISADE EvalMult method for plaintext * ciphertext
* @param pt2
* @param ct1
* @return new ciphertext for ct1 * pt2
*/
inline Ciphertext<Element>
EvalMult(ConstPlaintext pt2, ConstCiphertext<Element> ct1) const
{
return EvalMult(ct1, pt2);
}
/**
* EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext
* @param pt2
* @param ct1
* @return new ciphertext for ct1 * pt2
*/
inline Ciphertext<Element>
EvalMultMutable(Plaintext pt2, Ciphertext<Element> &ct1) const
{
return EvalMultMutable(ct1, pt2);
}
/**
* EvalMult - PALISADE EvalMult method for constant * ciphertext
* @param constant
* @param ct1
* @return new ciphertext for ct1 * constant
*/
inline Ciphertext<Element>
EvalMult(double constant, ConstCiphertext<Element> ct1) const
{
return EvalMult(ct1, constant);
}
inline Ciphertext<Element>
EvalMultMutable(double constant, Ciphertext<Element> &ct1) const
{
return EvalMultMutable(ct1, constant);
}
/**
* EvalShiftRight - works only for Fractional Encoding
* @param pt2
* @param ct1
* @return new ciphertext for ct1 * pt2
*/
Ciphertext<Element>
EvalRightShift(ConstCiphertext<Element> ct1, size_t divisor) const
{
if( ct1 && ct1->GetEncodingType() != Fractional ) {
stringstream ss;
ss << "A " << Fractional << " encoded ciphertext is required for the EvalRightShift operation";
PALISADE_THROW( type_error, ss.str() );
}
Plaintext plaintextShift = MakeFractionalPlaintext(0,divisor);
TypeCheck(ct1, plaintextShift);
double start = 0;
if( doTiming ) start = currentDateTime();
auto rv = EvalMult(ct1, plaintextShift);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalRightShift, currentDateTime() - start) );
}
return rv;
}
/**
* EvalMult - PALISADE EvalMult method for plaintext * ciphertext
* @param ct1
* @param pt2
* @return new ciphertext for ct1 * pt2
*/
Ciphertext<Element>
EvalMult(ConstCiphertext<Element> ct1, ConstPlaintext pt2) const
{
TypeCheck(ct1, pt2);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, pt2);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) );
}
return rv;
}
/**
* EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext
* This is a mutable version - input ciphertexts may get automatically
* rescaled, or level-reduced.
*
* @param ct1
* @param pt2
* @return new ciphertext for ct1 * pt2
*/
Ciphertext<Element>
EvalMultMutable(Ciphertext<Element> &ct1, Plaintext pt2) const
{
TypeCheck((ConstCiphertext<Element>) ct1, (ConstPlaintext) pt2);
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, pt2);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) );
}
return rv;
}
/**
* EvalMult - PALISADE EvalSub method for a ciphertext and constant
* @param ciphertext
* @param constant
* @return new ciphertext for ciphertext - constant
*/
Ciphertext<Element>
EvalMult(ConstCiphertext<Element> ciphertext, double constant) const
{
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMult(ciphertext, constant);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMultConst, TOC_US(t)) );
}
return rv;
}
/**
* EvalMult - PALISADE EvalSub method for a ciphertext and constant
* This is a mutable version - input ciphertexts may get automatically
* rescaled, or level-reduced.
*
* @param ciphertext
* @param constant
* @return new ciphertext for ciphertext - constant
*/
Ciphertext<Element>
EvalMultMutable(Ciphertext<Element> &ciphertext, double constant) const
{
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ciphertext, constant);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMultConst, TOC_US(t)) );
}
return rv;
}
/**
* EvalMultMatrix - PALISADE EvalMult method for two matrices of ciphertext
* @param ct1
* @param ct2
* @return new matrix for ct1 * ct2
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalMultMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const
{
TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited
TimeVar t;
if( doTiming ) TIC(t);
Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2;
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalMultMatrix, TOC_US(t)) );
}
shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv));
return a;
}
/**
* EvalSub - PALISADE Negate method for a ciphertext
* @param ct
* @return new ciphertext -ct
*/
Ciphertext<Element>
EvalNegate(ConstCiphertext<Element> ct) const
{
if (ct == NULL || Mismatched(ct->GetCryptoContext()) )
throw std::logic_error("Information passed to EvalNegate was not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalNegate(ct);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalNeg, TOC_US(t)) );
}
return rv;
}
/**
* EvalSub - PALISADE Negate method for a ciphertext
* @param ct
* @return new ciphertext -ct
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalNegateMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const
{
if (ct == NULL || Mismatched((*ct)(0,0).GetCryptoContext()) )
throw std::logic_error("Information passed to EvalNegateMatrix was not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
shared_ptr<Matrix<RationalCiphertext<Element>>> m(
new Matrix<RationalCiphertext<Element>>(ct->GetAllocator(), ct->GetRows(), ct->GetCols()));
for( size_t r = 0; r < m->GetRows(); r++ )
for( size_t c = 0; c < m->GetCols(); c++ )
(*m)(r,c) = -((*ct)(r,c));
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalNegMatrix, TOC_US(t)) );
}
return m;
}
/**
* Generate automophism keys for a given private key
*
* @param publicKey original public key.
* @param origPrivateKey original private key.
* @param indexList list of automorphism indices to be computed
* @return returns the evaluation keys; index 0 of the vector corresponds to plaintext index 2, index 1 to plaintex index 3, etc.
*/
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey,
const LPPrivateKey<Element> origPrivateKey, const std::vector<usint> &indexList) const {
if( publicKey == NULL || origPrivateKey == NULL )
PALISADE_THROW( type_error, "Null Keys");
if( publicKey->GetCryptoContext().get() != this )
PALISADE_THROW( type_error, "Key was not created in this CryptoContextImpl");
if( publicKey->GetCryptoContext() != origPrivateKey->GetCryptoContext() )
PALISADE_THROW( type_error, "Keys were not created in the same CryptoContextImpl");
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(publicKey, origPrivateKey, indexList);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAutomorphismKeyGen, TOC_US(t)) );
}
return rv;
}
/**
* Function for evaluating automorphism of ciphertext at index i
*
* @param ciphertext the input ciphertext.
* @param i automorphism index
* @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalAutomorphism(ConstCiphertext<Element> ciphertext, usint i,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
auto mf = evalKeys.begin();
if( mf == evalKeys.end() )
PALISADE_THROW( type_error, "Empty key map");
auto tk = mf->second;
if( ciphertext == NULL || tk == NULL )
PALISADE_THROW( type_error, "Null inputs");
if( ciphertext->GetCryptoContext().get() != this )
PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl");
if( ciphertext->GetCryptoContext() != tk->GetCryptoContext() )
PALISADE_THROW( type_error, "Items were not created in the same CryptoContextImpl");
if( ciphertext->GetKeyTag() != tk->GetKeyTag() )
PALISADE_THROW( type_error, "Items were not encrypted with same keys" );
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAutomorphismI, TOC_US(t)) );
}
return rv;
}
/**
* Generate automophism keys for a given private key; Uses the private key for encryption
*
* @param privateKey private key.
* @param indexList list of automorphism indices to be computed
* @return returns the evaluation keys
*/
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey,
const std::vector<usint> &indexList) const {
if( privateKey == NULL )
PALISADE_THROW( type_error, "Null input");
if( privateKey->GetCryptoContext().get() != this )
PALISADE_THROW( type_error, "Key was not created in this CryptoContextImpl");
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpEvalAutomorphismK, TOC_US(t)) );
}
return rv;
}
/**
* EvalSumKeyGen Generates the key map to be used by evalsum
*
* @param privateKey private key.
* @param publicKey public key (used in NTRU schemes).
*/
void EvalSumKeyGen(
const LPPrivateKey<Element> privateKey,
const LPPublicKey<Element> publicKey = nullptr);
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumRowsKeyGen(
const LPPrivateKey<Element> privateKey,
const LPPublicKey<Element> publicKey = nullptr, usint rowSize = 0);
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumColsKeyGen(
const LPPrivateKey<Element> privateKey,
const LPPublicKey<Element> publicKey = nullptr);
/**
* GetEvalSumKey returns the map
*
* @return the EvalSum key map
*/
static const std::map<usint, LPEvalKey<Element>>& GetEvalSumKeyMap(const string& id);
static const std::map<string,shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalSumKeys();
/**
* Function for evaluating a sum of all components
*
* @param ciphertext the input ciphertext.
* @param batchSize size of the batch
* @return resulting ciphertext
*/
Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize) const;
Ciphertext<Element> EvalSumRows(ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>> &evalKeys) const;
Ciphertext<Element> EvalSumCols(ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>> &evalKeys) const;
/**
* EvalSumKeyGen Generates the key map to be used by evalsum
*
* @param privateKey private key.
* @param indexList list of indices.
* @param publicKey public key (used in NTRU schemes).
*/
void EvalAtIndexKeyGen(const LPPrivateKey<Element> privateKey,
const std::vector<int32_t> &indexList, const LPPublicKey<Element> publicKey = nullptr);
/**
* EvalFastRotationPrecompute implements the precomputation step of
* hoisted automorphisms.
*
* Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic
* linear transformations in HELib." for more details, link:
* https://eprint.iacr.org/2018/244.
*
* Generally, automorphisms are performed with three steps: (1) the automorphism is
* applied on the ciphertext, (2) the automorphed values are decomposed into digits,
* and (3) key switching is applied to make it possible to further compute on the
* ciphertext.
*
* Hoisted automorphisms is a technique that performs the digit decomposition for the
* original ciphertext first, and then performs the automorphism and the key switching
* on the decomposed digits. The benefit of this is that the digit decomposition is
* independent of the automorphism rotation index, so it can be reused for multiple
* different indices. This can greatly improve performance when we have to compute many
* automorphisms on the same ciphertext. This routinely happens when we do permutations
* (EvalPermute).
*
* EvalFastRotationPrecompute implements the digit decomposition step of hoisted
* automorphisms.
*
* @param ct the input ciphertext on which to do the precomputation (digit decomposition)
*/
shared_ptr<vector<Element>> EvalFastRotationPrecompute(
ConstCiphertext<Element> ct
) const {
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalFastRotationPrecompute(ct);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpFastRotPrecomp, TOC_US(t)) );
}
return rv;
}
/**
* EvalFastRotation implements the automorphism and key switching step of
* hoisted automorphisms.
*
* Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic
* linear transformations in HELib." for more details, link:
* https://eprint.iacr.org/2018/244.
*
* Generally, automorphisms are performed with three steps: (1) the automorphism is
* applied on the ciphertext, (2) the automorphed values are decomposed into digits,
* and (3) key switching is applied to make it possible to further compute on the
* ciphertext.
*
* Hoisted automorphisms is a technique that performs the digit decomposition for the
* original ciphertext first, and then performs the automorphism and the key switching
* on the decomposed digits. The benefit of this is that the digit decomposition is
* independent of the automorphism rotation index, so it can be reused for multiple
* different indices. This can greatly improve performance when we have to compute many
* automorphisms on the same ciphertext. This routinely happens when we do permutations
* (EvalPermute).
*
* EvalFastRotation implements the automorphism and key swithcing step of hoisted
* automorphisms.
*
* This method assumes that all required rotation keys exist. This may not be true
* if we are using baby-step/giant-step key switching. Please refer to Section 5.1 of
* the above reference and EvalPermuteBGStepHoisted to see how to deal with this issue.
*
* @param ct the input ciphertext to perform the automorphism on
* @param index the index of the rotation. Positive indices correspond to left rotations
* and negative indices correspond to right rotations.
* @param m is the cyclotomic order
* @param digits the digit decomposition created by EvalFastRotationPrecompute at
* the precomputation step.
*/
Ciphertext<Element> EvalFastRotation(
ConstCiphertext<Element> ct,
const usint index,
const usint m,
const shared_ptr<vector<Element>> digits
) const {
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalFastRotation(ct, index, m, digits);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpFastRot, TOC_US(t)) );
}
return rv;
}
/**
* Merges multiple ciphertexts with encrypted results in slot 0 into a single ciphertext
* The slot assignment is done based on the order of ciphertexts in the vector
*
* @param ciphertextVector vector of ciphertexts to be merged.
* @param &evalKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalMerge(const vector<Ciphertext<Element>> &ciphertextVector) const;
/**
* GetEvalAutomorphismKey returns the map
*
* @return the EvalAutomorphism key map
*/
static const std::map<usint, LPEvalKey<Element>>& GetEvalAutomorphismKeyMap(const string& id);
static const std::map<string,shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalAutomorphismKeys();
/**
* Moves i-th slot to slot 0
*
* @param ciphertext.
* @param i the index.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, int32_t index) const;
/**
* Evaluates inner product in batched encoding
*
* @param ciphertext1 first vector.
* @param ciphertext2 second vector.
* @param batchSize size of the batch to be summed up
* @return resulting ciphertext
*/
Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, usint batchSize) const;
/**
* Evaluates inner product in batched encoding
*
* @param ciphertext1 first vector.
* @param ciphertext2 second vector.
* @param batchSize size of the batch to be summed up
* @return resulting ciphertext
*/
Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstPlaintext ciphertext2, usint batchSize) const;
/**
* EvalCrossCorrelation - Computes the sliding sum of inner products (known as
* as cross-correlation, sliding inner product, or sliding dot product in
* image processing
* @param x - first vector of row vectors
* @param y - second vector of row vectors
* @param batchSize - batch size for packed encoding
* @param indexStart - starting index in the vectors of row vectors
* @param length - length of the slice in the vectors of row vectors; default is 0 meaning to use the full length of the vector
* @return sum(x_i*y_i), i.e., a sum of inner products
*/
Ciphertext<Element>
EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
usint indexStart = 0, usint length = 0) const;
/**
* EvalLinRegressBatched- Computes the parameter vector for linear regression using the least squares method
* Supported only in batched mode; currently works only for two regressors
* @param x - matrix of regressors
* @param y - vector of dependent variables
* @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method)
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const;
/**
* EvalLinRegression - Computes the parameter vector for linear regression using the least squares method
* @param x - matrix of regressors
* @param y - vector of dependent variables
* @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method)
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const
{
TypeCheck((*x)(0,0), (*y)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpLinRegression, TOC_US(t)) );
}
return rv;
}
/**
* KeySwitch - PALISADE KeySwitch method
* @param keySwitchHint - reference to KeySwitchHint
* @param ciphertext - vector of ciphertext
* @return new CiphertextImpl after applying key switch
*/
Ciphertext<Element> KeySwitch(
const LPEvalKey<Element> keySwitchHint,
ConstCiphertext<Element> ciphertext) const
{
if( keySwitchHint == NULL || Mismatched(keySwitchHint->GetCryptoContext()) )
throw std::logic_error("Key passed to KeySwitch was not generated with this crypto context");
if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) )
throw std::logic_error("Ciphertext passed to KeySwitch was not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpKeySwitch, TOC_US(t)) );
}
return rv;
}
/**
* Rescale - An alias for PALISADE ModReduce method.
* This is because ModReduce is called Rescale in CKKS.
*
* @param ciphertext - vector of ciphertext
* @return vector of mod reduced ciphertext
*/
Ciphertext<Element> Rescale(ConstCiphertext<Element> ciphertext) const {
if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) )
throw std::logic_error("Information passed to Rescale was not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpModReduce, TOC_US(t)) );
}
return rv;
}
/**
* ModReduce - PALISADE ModReduce method
* @param ciphertext - vector of ciphertext
* @return vector of mod reduced ciphertext
*/
Ciphertext<Element> ModReduce(ConstCiphertext<Element> ciphertext) const {
if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) )
throw std::logic_error("Information passed to ModReduce was not generated with this crypto context");
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpModReduce, TOC_US(t)) );
}
return rv;
}
/**
* ModReduce - PALISADE ModReduce method
* @param ciphertext - vector of ciphertext
* @return vector of mod reduced ciphertext
*/
RationalCiphertext<Element> ModReduceRational(RationalCiphertext<Element> ciphertext) const {
TimeVar t;
if( doTiming ) TIC(t);
Ciphertext<Element> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator());
Ciphertext<Element> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator());
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpModReduce, TOC_US(t)) );
}
return RationalCiphertext<Element>(n,d);
}
/**
* ModReduce - PALISADE ModReduce method
* @param ciphertext - vector of ciphertext
* @return vector of mod reduced ciphertext
*/
shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix(shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const {
// needs context check
TimeVar t;
if( doTiming ) TIC(t);
shared_ptr<Matrix<RationalCiphertext<Element>>> m(
new Matrix<RationalCiphertext<Element>>(ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols()));
for( size_t r = 0; r < m->GetRows(); r++ )
for( size_t c = 0; c < m->GetCols(); c++ )
(*m)(r,c) = ModReduceRational((*ciphertext)(r,c));
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpModReduceMatrix, TOC_US(t)) );
}
return m;
}
/**
* LevelReduce - PALISADE LevelReduce method
* @param cipherText1
* @param linearKeySwitchHint
* @return vector of level reduced ciphertext
*/
Ciphertext<Element> LevelReduce(ConstCiphertext<Element> cipherText1,
const LPEvalKeyNTRU<Element> linearKeySwitchHint, size_t levels = 1) const {
const shared_ptr<LPCryptoParametersCKKS<DCRTPoly>> cryptoParams =
std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>(cipherText1->GetCryptoParameters());
if( cipherText1 == NULL ||
Mismatched(cipherText1->GetCryptoContext()) ) {
throw std::logic_error("Information passed to LevelReduce was not generated with this crypto context");
}
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->LevelReduce(cipherText1, linearKeySwitchHint, levels);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpLevelReduce, TOC_US(t)) );
}
return rv;
}
/**
* ComposedEvalMult - PALISADE composed evalmult
* @param ciphertext1 - vector for first cipher text
* @param ciphertext2 - vector for second cipher text
* @param quadKeySwitchHint - is the quadratic key switch hint from original private key to the quadratic key
* return vector of resulting ciphertext
*/
Ciphertext<Element> ComposedEvalMult(
ConstCiphertext<Element> ciphertext1,
ConstCiphertext<Element> ciphertext2) const
{
if( ciphertext1 == NULL || ciphertext2 == NULL || ciphertext1->GetKeyTag() != ciphertext2->GetKeyTag() ||
Mismatched(ciphertext1->GetCryptoContext()) )
throw std::logic_error("Ciphertexts passed to ComposedEvalMult were not generated with this crypto context");
auto ek = GetEvalMultKeyVector(ciphertext1->GetKeyTag());
TimeVar t;
if( doTiming ) TIC(t);
auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, ek[0]);
if( doTiming ) {
timeSamples->push_back( TimingInfo(OpComposedEvalMult, TOC_US(t)) );
}
return rv;
}
static LPPublicKey<Element> deserializePublicKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static LPPrivateKey<Element> deserializeSecretKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static LPEvalKey<Element> deserializeEvalKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
static LPEvalKey<Element> deserializeEvalKeyInContext(const Serialized& serObj, CryptoContext<Element> cc) __attribute__ ((deprecated("serialization changed, see wiki for details")));
template <class Archive>
void save( Archive & ar, std::uint32_t const version ) const
{
ar( cereal::make_nvp("cc", params) );
ar( cereal::make_nvp("kt", scheme) );
ar( cereal::make_nvp("si", m_schemeId) );
}
template <class Archive>
void load( Archive & ar, std::uint32_t const version )
{
if( version > SerializedVersion() ) {
PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library");
}
ar( cereal::make_nvp("cc", params) );
ar( cereal::make_nvp("kt", scheme) );
ar( cereal::make_nvp("si", m_schemeId) );
// NOTE: a pointer to this object will be wrapped in a shared_ptr, and is a "CryptoContext".
// PALISADE relies on the notion that identical CryptoContextImpls are not duplicated in memory
// Once we deserialize this object, we must check to see if there is a matching object
// for this object that's already existing in memory
// if it DOES exist, use it. If it does NOT exist, add this to the cache of all contexts
}
virtual std::string SerializedObjectName() const { return "CryptoContext"; }
static uint32_t SerializedVersion() { return 1; }
};
/**
* @brief CryptoObject
*
* A class to aid in referring to the crypto context that an object belongs to
*/
template<typename Element>
class CryptoObject {
protected:
CryptoContext<Element> context; /*!< crypto context this object belongs to */
string keyTag; /*!< tag used to find the evaluation key needed for SHE/FHE operations */
public:
CryptoObject(CryptoContext<Element> cc = 0, const string& tag = "") : context(cc), keyTag(tag) {}
CryptoObject(const CryptoObject& rhs) {
context = rhs.context;
keyTag = rhs.keyTag;
}
CryptoObject(const CryptoObject&& rhs) {
context = std::move(rhs.context);
keyTag = std::move(rhs.keyTag);
}
virtual ~CryptoObject() {}
const CryptoObject& operator=(const CryptoObject& rhs) {
this->context = rhs.context;
this->keyTag = rhs.keyTag;
return *this;
}
const CryptoObject& operator=(const CryptoObject&& rhs) {
this->context = std::move(rhs.context);
this->keyTag = std::move(rhs.keyTag);
return *this;
}
bool operator==(const CryptoObject& rhs) const {
return context.get() == rhs.context.get() &&
keyTag == rhs.keyTag;
}
CryptoContext<Element> GetCryptoContext() const { return context; }
const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); }
const EncodingParams GetEncodingParameters() const { return context->GetCryptoParameters()->GetEncodingParams(); }
const string GetKeyTag() const { return keyTag; }
void SetKeyTag(const string& tag) { keyTag = tag; }
template <class Archive>
void save( Archive & ar, std::uint32_t const version ) const
{
ar( ::cereal::make_nvp("cc", context) );
ar( ::cereal::make_nvp("kt", keyTag) );
}
template <class Archive>
void load( Archive & ar, std::uint32_t const version )
{
if( version > SerializedVersion() ) {
PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library");
}
ar( ::cereal::make_nvp("cc", context) );
ar( ::cereal::make_nvp("kt", keyTag) );
context = CryptoContextFactory<Element>::GetContext(context->GetCryptoParameters(),context->GetEncryptionAlgorithm());
}
std::string SerializedObjectName() const { return "CryptoObject"; }
static uint32_t SerializedVersion() { return 1; }
};
/**
* @brief CryptoContextFactory
*
* A class that contains static methods to generate new crypto contexts from user parameters
*
*/
template<typename Element>
class CryptoContextFactory {
protected:
static vector<CryptoContext<Element>> AllContexts;
public:
static void ReleaseAllContexts();
static int GetContextCount();
static CryptoContext<Element> GetSingleContext();
static CryptoContext<Element> GetContext(
shared_ptr<LPCryptoParameters<Element>> params,
shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme,
const string & schemeId = "Not");
static CryptoContext<Element> GetContextForPointer(CryptoContextImpl<Element>* cc);
static const vector<CryptoContext<Element>>& GetAllContexts();
/**
* construct a PALISADE CryptoContextImpl for the BFV Scheme
* @param params ring parameters
* @param plaintextModulus plaintext modulus
* @param relinWindow bits in the base of digits in key switching/relinearization
* @param stdDev sigma - distribution parameter for error distribution
* @param delta - the plaintext scaling parameter floor(q/t) in BFV
* @param mode - mode for generating secret keys (RLWE vs OPTIMIZED)
* @param bigmodulus - large modulus used in tensoring of homomorphic multiplication
* @param bigrootofunity - root of unity for bigmodulus
* @param depth of supported computation circuit (not used; for future use)
* @param assuranceMeasure alpha - effective bound for gaussians: - sqrt{alpha}*sigma..sqrt{alpha}*sigma
* @param security level - root Hermite factor
* @param bigmodulusarb - additional large modulus for bigmoduls for the case of general (non-power-of-two) cyclotomics
* @param bigrootofunityarb - root of unity for bigmodulusarb
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFV(shared_ptr<typename Element::Params> params,
const PlaintextModulus plaintextmodulus,
usint relinWindow, float stDev, const std::string& delta,
MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0",
int depth = 0, int assuranceMeasure = 0, float securityLevel = 0,
const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2);
/**
* construct a PALISADE CryptoContextImpl for the BFV Scheme
* @param params ring parameters
* @param encodingParams plaintext encoding parameters
* @param relinWindow bits in the base of digits in key switching/relinearization
* @param stdDev sigma - distribution parameter for error distribution
* @param delta - the plaintext scaling parameter floor(q/t) in BFV
* @param mode - mode for generating secret keys (RLWE vs OPTIMIZED)
* @param bigmodulus - large modulus used in tensoring of homomorphic multiplication
* @param bigrootofunity - root of unity for bigmodulus
* @param depth of supported computation circuit (not used; for future use)
* @param assuranceMeasure alpha - effective bound for gaussians: - sqrt{alpha}*sigma..sqrt{alpha}*sigma
* @param security level - root Hermite factor
* @param bigmodulusarb - additional large modulus for bigmoduls for the case of general (non-power-of-two) cyclotomics
* @param bigrootofunityarb - root of unity for bigmodulusarb
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFV(shared_ptr<typename Element::Params> params,
EncodingParams encodingParams,
usint relinWindow, float stDev, const std::string& delta,
MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0",
int depth = 0, int assuranceMeasure = 0, float securityLevel = 0,
const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2);
/**
* construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods
* @param plaintextModulus plaintext modulus
* @param securityLevel root Hermite factor (lattice security parameter)
* @param relinWindow bits in the base of digits in key switching/relinearization
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFV(
const PlaintextModulus plaintextModulus, float securityLevel, usint relinWindow, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods
* @param plaintextModulus plaintext modulus
* @param securityLevel standard security level
* @param relinWindow bits in the base of digits in key switching/relinearization
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFV(
const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, usint relinWindow, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods
* @param encodingParams plaintext encoding parameters
* @param securityLevel root Hermite factor (lattice security parameter)
* @param distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFV(
EncodingParams encodingParams, float securityLevel, usint relinWindow, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods
* @param encodingParams plaintext encoding parameters
* @param securityLevel standard security level
* @param distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFV(
EncodingParams encodingParams, SecurityLevel securityLevel, usint relinWindow, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods
* @param plaintextModulus plaintext modulus
* @param securityLevel root Hermite factor (lattice security parameter)
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param relinWindow the key switching window (bits in the base for digits) used for digit decomposition (0 - means to use only CRT decomposition)
* @param dcrtBits size of "small" CRT moduli
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFVrns(
const PlaintextModulus plaintextModulus, float securityLevel, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2,
uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods
* @param plaintextModulus plaintext modulus
* @param securityLevel standard secuirity level
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param relinWindow the key switching window (bits in the base for digits) used for digit decomposition (0 - means to use only CRT decomposition)
* @param dcrtBits size of "small" CRT moduli
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFVrns(
const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2,
uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods
* @param encodingParams plaintext encoding parameters
* @param securityLevel root Hermite factor (lattice security parameter)
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition)
* @param dcrtBits size of "small" CRT moduli
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFVrns(
EncodingParams encodingParams, float securityLevel, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2,
uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods
* @param encodingParams plaintext encoding parameters
* @param securityLevel standard security level
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition)
* @param dcrtBits size of "small" CRT moduli
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFVrns(
EncodingParams encodingParams, SecurityLevel securityLevel, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2,
uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the scheme's ParamsGen methods
* @param plaintextModulus plaintext modulus
* @param securityLevel root Hermite factor (lattice security parameter)
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition)
* @param dcrtBits size of "small" CRT moduli
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFVrnsB(
const PlaintextModulus plaintextModulus, float securityLevel, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2,
uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the scheme's ParamsGen methods
* @param plaintextModulus plaintext modulus
* @param securityLevel standard security level
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition)
* @param dcrtBits size of "small" CRT moduli
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFVrnsB(
const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2,
uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the scheme's ParamsGen methods
* @param encodingParams plaintext encoding parameters
* @param securityLevel root Hermite factor (lattice security parameter)
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition)
* @param dcrtBits size of "small" CRT moduli
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFVrnsB(
EncodingParams encodingParams, float securityLevel, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2,
uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the scheme's ParamsGen methods
* @param encodingParams plaintext encoding parameters
* @param securityLevel standard security level
* @param dist distribution parameter for Gaussian noise generation
* @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero)
* @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero)
* @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero)
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization
* @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition)
* @param dcrtBits size of "small" CRT moduli
* @param n ring dimension in case the user wants to use a custom ring dimension
* @return new context
*/
static CryptoContext<Element> genCryptoContextBFVrnsB(
EncodingParams encodingParams, SecurityLevel securityLevel, float dist,
unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2,
uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0);
/**
* construct a PALISADE CryptoContextImpl for the BGV Scheme
* @param params ring parameters
* @param plaintextModulus plaintext modulus
* @param relinWindow bits in the base of digits in key switching/relinearization
* @param stdDev sigma - distribution parameter for error distribution
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param depth of supported computation circuit (not used; for future use)
* @return new context
*/
static CryptoContext<Element> genCryptoContextBGV(shared_ptr<typename Element::Params> params,
const PlaintextModulus plaintextmodulus,
usint relinWindow, float stDev,
MODE mode = RLWE, int depth = 1);
/**
* construct a PALISADE CryptoContextImpl for the BGV Scheme
* @param params ring parameters
* @param encodingParams plaintext encoding parameters
* @param relinWindow bits in the base of digits in key switching/relinearization
* @param stdDev sigma - distribution parameter for error distribution
* @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution])
* @param depth of supported computation circuit (not used; for future use)
* @return new context
*/
static CryptoContext<Element> genCryptoContextBGV(shared_ptr<typename Element::Params> params,
EncodingParams encodingParams,
usint relinWindow, float stDev,
MODE mode = RLWE, int depth = 1);
/**
* construct a PALISADE CryptoContextImpl for the CKKS Scheme
* @param plaintextmodulus
* @param ringdim
* @param modulus
* @param rootOfUnity
* @param relinWindow
* @param stDev
* @param mode
* @param depth
* @param maxDepth maximum depth of multiplications without
* relinearization to support
* @param ksTech key switching technique to use (e.g., GHS or BV)
* @param rsTech rescaling technique to use (e.g., APPROXRESCALE or EXACTRESCALE)
* @return new context
*/
static CryptoContext<Element> genCryptoContextCKKS(shared_ptr<typename Element::Params> params,
const PlaintextModulus plaintextmodulus,
usint relinWindow, float stDev,
MODE mode = RLWE, int depth = 1, int maxDepth = 1,
KeySwitchTechnique ksTech = BV,
RescalingTechnique rsTech = APPROXRESCALE);
/**
* construct a PALISADE CryptoContextImpl for the CKKS Scheme
* @param encodingParams
* @param ringdim
* @param modulus
* @param rootOfUnity
* @param relinWindow
* @param stDev
* @param mode
* @param maxDepth is the maximum homomorphic multiplication depth
* before performing relinearization
* @param ksTech key switching technique to use (e.g., GHS or BV)
* @param rsTech rescaling technique to use (e.g., APPROXRESCALE or EXACTRESCALE)
* @return new context
*/
static CryptoContext<Element> genCryptoContextCKKS(shared_ptr<typename Element::Params> params,
EncodingParams encodingParams,
usint relinWindow, float stDev,
MODE mode = RLWE, int depth = 1, int maxDepth = 1,
enum KeySwitchTechnique ksTech = BV,
RescalingTechnique rsTech = APPROXRESCALE);
/**
* Automatically generate the moduli chain and construct a PALISADE
* CryptoContextImpl for the CKKS Scheme with it.
*
* @param cyclOrder the cyclotomic order M
* @param numPrimes the number of towers/primes to use when building the moduli chain
* @param scaleExp the plaintext scaling factor, which is equal to dcrtBits in our implementation of CKKS
* @param batchSize the batch size of the ciphertext
* @param mode RLWE or OPTIMIZED
* @param depth
* @param maxDepth is the maximum homomorphic multiplication depth before performing relinearization
* @param firstModSize the bit-length of the first modulus
* @param ksTech key switching technique to use (e.g., GHS or BV)
* @param rsTech rescaling technique to use (e.g., APPROXRESCALE or EXACTRESCALE)
* @param numLargeDigits the number of big digits to use in HYBRID key switching
* @return new context
*/
static CryptoContext<Element> genCryptoContextCKKSWithParamsGen(
usint cyclOrder,
usint numPrimes,
usint scaleExp,
usint relinWindow,
usint batchSize,
MODE mode,
int depth = 1,
int maxDepth = 1,
usint firstModSize = 60,
enum KeySwitchTechnique ksTech = BV,
enum RescalingTechnique rsTech = APPROXRESCALE,
uint32_t numLargeDigits = 4);
/**
* Construct a PALISADE CryptoContextImpl for the CKKS Scheme.
*
* @param multiplicativeDepth the depth of multiplications supported by the scheme (equal to number of towers - 1)
* @param scalingFactorBits the size of the scaling factor in bits
* @param batchSize the number of slots being used in the ciphertext
* @param stdLevel the standard security level we want the scheme to satisfy
* @param ringDim the ring dimension (if not specified selected automatically based on stdLevel)
* @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV)
* @param rsTech rescaling technique to use (e.g., APPROXRESCALE or EXACTRESCALE)
* @param numLargeDigits the number of big digits to use in HYBRID key switching
* @param maxDepth is the maximum homomorphic multiplication depth before performing relinearization
* @param firstModSize the bit-length of the first modulus
* @param relinWindow the relinearization windows (used in BV key switching, use 0 for RNS decomposition)
* @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary distribution)
* @return new context
*/
static CryptoContext<Element> genCryptoContextCKKS(
usint multiplicativeDepth,
usint scalingFactorBits,
usint batchSize,
SecurityLevel stdLevel = HEStd_128_classic,
usint ringDim = 0,
enum RescalingTechnique rsTech = EXACTRESCALE,
enum KeySwitchTechnique ksTech = HYBRID,
uint32_t numLargeDigits = 0,
int maxDepth = 1,
usint firstModSize = 60,
usint relinWindow = 0,
MODE mode = OPTIMIZED);
/**
* construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme
* @param params ring parameters
* @param plaintextModulus plaintext modulus
* @param relinWindow bits in the base of digits in key switching/relinearization
* @param stdDev sigma - distribution parameter for error distribution
* @param stdDev distribution parameter for secret key distribution
* @param depth of supported computation circuit (not used; for future use)
* @param assuranceMeasure alpha - effective bound for gaussians: - sqrt{alpha}*sigma..sqrt{alpha}*sigma
* @param security level - root Hermite factor
* @return new context
*/
static CryptoContext<Element> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params,
const PlaintextModulus plaintextmodulus,
usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006);
/**
* construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme
* @param params ring parameters
* @param encodingParams plaintext encoding parameters
* @param relinWindow bits in the base of digits in key switching/relinearization
* @param stdDev sigma - distribution parameter for error distribution
* @param stdDev distribution parameter for secret key distribution
* @param depth of supported computation circuit (not used; for future use)
* @param assuranceMeasure alpha - effective bound for gaussians: - sqrt{alpha}*sigma..sqrt{alpha}*sigma
* @param security level - root Hermite factor
* @return new context
*/
static CryptoContext<Element> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params,
EncodingParams encodingParams,
usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006);
/**
* construct a PALISADE CryptoContextImpl for the Null Scheme
* @param m cyclotomic order (ring dimension n = m/2 for power-of-two cyclotomics)
* @param plaintextModulus plaintext modulus
* @return
*/
static CryptoContext<Element> genCryptoContextNull(unsigned int m, const PlaintextModulus ptModulus);
/**
* construct a PALISADE CryptoContextImpl for the Null Scheme
* @param m cyclotomic order (ring dimension n = m/2 for power-of-two cyclotomics)
* @param encodingParams plaintext encoding parameters
* @return
*/
static CryptoContext<Element> genCryptoContextNull(unsigned int m, EncodingParams encodingParams);
static CryptoContext<Element> DeserializeAndCreateContext(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details")));
};
}
#endif /* SRC_PKE_CRYPTOCONTEXT_H_ */
|
debug_test_system.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// ==========================================================================
// The SeqAn testing infrastructure. Based on ideas from the OpenMS
// "ClassTest.h".
// ==========================================================================
// TODO(holtgrew): This could use some cleanup.
// SEQAN_NO_GENERATED_FORWARDS
#ifndef SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
#define SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
#include <iostream> // stdout, stderr
#include <iomanip>
#include <cstring> // strrpos
#include <cstdlib> // exit()
#include <cstdio>
#include <cstdarg> // va_start, va_list, va_end
#include <algorithm> // min()
#include <set>
#include <vector>
#include <string>
#ifdef PLATFORM_WINDOWS
#include <Windows.h> // DeleteFile()
#else // #ifdef PLATFORM_WINDOWS
#include <unistd.h> // unlink()
#include <sys/stat.h> // mkdir()
#include <dirent.h> // DIR
#if SEQAN_HAS_EXECINFO
#include <execinfo.h> // backtrace(), backtrace_symbols()
#endif // #if SEQAN_HAS_EXECINFO
#include <cxxabi.h> // __cxa_demangle()
#include <signal.h>
#endif // #ifdef PLATFORM_WINDOWS
/**
.Macro.SEQAN_FAIL
..cat:Assertions
..summary:Force abortion of program, regardless of debugging settings.
..signature:SEQAN_FAIL(msg[, args])
..param.msg:A format string.
..param.args:An optional list of arguments.
..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values.
..example.text:In the following example, the $SEQAN_FAIL$ is there if a possible value is added to $MyEnum$ but the function $foo$ is not updated accordingly.
..example.code:
enum MyEnum {
VALUE_ONE,
VALUE_TWO
};
bool foo(MyEnum x) {
switch (x) {
case VALUE_ONE:
// do something
return true;
case VALUE_TWO:
// do something
return true;
}
SEQAN_FAIL("Logic error. Should never reach here. x == %d.", x);
return false;
}
..include:seqan/basic.h
..see:Macro.SEQAN_CHECK
*/
#define SEQAN_FAIL(...) \
do { \
::seqan::ClassTest::forceFail(__FILE__, __LINE__, \
__VA_ARGS__); \
::seqan::ClassTest::fail(); \
} while (false)
/**
.Macro.SEQAN_CHECK
..cat:Assertions
..summary:Force abortion of program if a condition is not met, regardless of debugging settings.
..signature:SEQAN_CHECK(condition, msg[, args])
..param.msg:A format string.
..param.args:An optional list of arguments.
..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values.
..example.text:In the following example, the $SEQAN_CHECK$ stops program execution if a value is added to $MyEnum$ but the function $foo$ is not updated accordingly.
..example.code:
enum MyEnum {
VALUE_ONE,
VALUE_TWO
};
bool foo(MyEnum x) {
SEQAN_CHECK((x == VALUE_ONE || x == VALUE_TWO), "Invalid value for x == %d.", x);
switch (x) {
case VALUE_ONE:
// do something
return true;
case VALUE_TWO:
// do something
return true;
}
return false; // Should never reach here, checked above with SEQAN_CHECK.
}
..include:seqan/basic.h
..see:Macro.SEQAN_FAIL
*/
#define SEQAN_CHECK(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// SeqAn's has three global debug/testing levels: testing, debug and
// release. Depending on the level, the SEQAN_ASSERT_* and
// SEQAN_CHECKPOINT macros will be enabled.
//
// Note that this is independent of the <cassert> assertions and
// NDEBUG being defined.
//
// The levels are enabled by the values of the macros
// SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to
// 0, one disables the level and by setting the macro to 1, one
// enables a level. Enabling testing also enables debug, overriding a
// value of 0 for SEQAN_ENABLE_DEBUG.
//
// If the level is release (both the macros for debug and testing are
// 0), the assertions will be disabled. If the level is debug then
// the assertions will be enabled. If the level is testing then the
// checkpoint macros will also be enabled.
//
// The default is to enable debugging but disable testing.
//
// You can print the current level using the function seqan::printDebugLevel().
/**
.Macro.SEQAN_ENABLE_TESTING
..cat:Testing & Debugging
..summary:Indicates whether testing is enabled.
..signature:SEQAN_ENABLE_DEBUG
..remarks:When enabled (set to 1), testing is enabled. This means the macros for the tests (@Macro.SEQAN_BEGIN_TESTSUITE@, @Macro.SEQAN_DEFINE_TEST@, @Macro.SEQAN_CALL_TEST@, and @Macro.SEQAN_END_TESTSUITE@) will be enabled. This makes failing assertions raise
exceptions instead of call $abort()$ and enables checkpoints.
..remarks:By default, this is set to 0.
..remarks:If @Macro.SEQAN_ENABLE_CHECKPOINTS@ is not defined before including $<seqan/basic.h>$, then @Macro.SEQAN_ENABLE_CHECKPOINTS@ will be set to the value of @Macro.SEQAN_ENABLE_TESTING@ (after the default initialization to 0).
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..remarks:If set to 1 then @Macro.SEQAN_ENABLE_TESTING@ is force-set to 0 as well.
..see:Macro.SEQAN_ENABLE_DEBUG
..see:Macro.SEQAN_ENABLE_CHECKPOINTS
*/
// Set default for SEQAN_ENABLE_TESTING.
#ifndef SEQAN_ENABLE_TESTING
#define SEQAN_ENABLE_TESTING 0
#endif // #ifndef SEQAN_ENABLE_TESTING
/**
.Macro.SEQAN_ENABLE_DEBUG
..cat:Testing & Debugging
..summary:Indicates whether debugging is enabled.
..signature:SEQAN_ENABLE_DEBUG
..remarks:When enabled (set to 1), debugging is enabled. This means the assertion macros are expanded to actual code and not to nothing.
..remarks:By default, this is set to 0 if $NDEBUG$ is defined and to 1 if $NDEBUG$ is not defined.
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..remarks:Force-enabled if @Macro.SEQAN_ENABLE_TESTING@ is set to 1.
..see:Macro.SEQAN_ENABLE_TESTING
..see:Macro.SEQAN_ENABLE_CHECKPOINTS
*/
// Set default for SEQAN_ENABLE_DEBUG.
#ifndef SEQAN_ENABLE_DEBUG
#ifdef NDEBUG
#define SEQAN_ENABLE_DEBUG 0
#else // #ifdef NDEBUG
#define SEQAN_ENABLE_DEBUG 1
#endif // #ifdef NDEBUG
#endif // #ifndef SEQAN_ENABLE_DEBUG
// Force-enable debugging if testing is enabled.
#if SEQAN_ENABLE_TESTING
#undef SEQAN_ENABLE_DEBUG
#define SEQAN_ENABLE_DEBUG 1
#endif // #if SEQAN_ENABLE_TESTING
/**
.Macro.SEQAN_ENABLE_CHECKPOINTS
..cat:Testing & Debugging
..summary:Indicates whether checkpoints are enabled.
..signature:SEQAN_ENABLE_CHECKPOINTS
..remarks:When enabled (set to 1), checkpoints are enabled. This means the $SEQAN_CHECKPOINT$ macros are expanded to actual code and not to nothing.
..remarks:By default, this is set to $SEQAN_ENABLE_TESTING$.
..remarks:Checkpoints can come at large increases of running time in your tests. Disable them when your test run too slow.
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..example.text:Disable checkpoints in a program.
..example.code:
// Disable SeqAn checkpoints in this program.
#define SEQAN_ENABLE_CHECKPOINTS 0
// Any SeqAn headers or headers including SeqAn headers have to come AFTER the
// definition of SEQAN_ENABLE_CHECKPOINT above.
#include <seqan/base.h>
int main(int argc, char const ** argv)
{
// Any call to SeqAn functions will NOT log any checkpoints.
return 0;
}
..see:Macro.SEQAN_ENABLE_DEBUG
..see:Macro.SEQAN_ENABLE_TESTING
*/
// Allow disabling checkpoints independent of testing.
#ifndef SEQAN_ENABLE_CHECKPOINTS
#define SEQAN_ENABLE_CHECKPOINTS 0 // SEQAN_ENABLE_TESTING
#endif // #ifndef SEQAN_ENABLE_CHECKPOINTS
/**
.Macro.SEQAN_TYPEDEF_FOR_DEBUG
..cat:Testing & Debugging
..summary: When using typedefs that are only used in debug mode then they have to be marked with macro.
..signature:SEQAN_TYPEDEF_FOR_DEBUG
..example.code:
typedef int TInt SEQAN_TYPEDEF_FOR_DEBUG;
*/
#if !SEQAN_ENABLE_DEBUG
# if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)))
# define SEQAN_TYPEDEF_FOR_DEBUG __attribute__((unused))
# else
# define SEQAN_TYPEDEF_FOR_DEBUG
# endif
#else
# define SEQAN_TYPEDEF_FOR_DEBUG
#endif
// TODO(holtgrew): This one is for profiling and in tests.
#if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)))
# define SEQAN_UNUSED_TYPEDEF __attribute__((unused))
#else
# define SEQAN_UNUSED_TYPEDEF
#endif
namespace seqan {
// SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string
// literal with this value.
#if !defined(SEQAN_CXX_FLAGS_)
#define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET
#endif // !defined(SEQAN_CXX_FLAGS__)
#define SEQAN_MKSTRING_(str) # str
#define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str)
#define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_)
//#undef SEQAN_MKSTRING
//#undef SEQAN_MKSTRING_
/**
.Function.printDebugLevel
..cat:Testing & Debugging
..summary:Print the current SeqAn debug level and the compiler flags to the given stream.
..signature:printDebugLevel(stream)
..param.stream:The stream to print to, e.g. $std::cout$.
..include:seqan/basic.h
*/
template <typename TStream>
void printDebugLevel(TStream & stream)
{
stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl;
stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl;
stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl;
stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl;
}
#if defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO
template <typename TSize>
void printStackTrace(TSize /*maxFrames*/)
{}
#else
// print a demangled stack backtrace of the caller function
template <typename TSize>
void printStackTrace(TSize maxFrames)
{
void * addrlist[256];
char temp[4096];
char addr[20];
char offset[20];
size_t size;
int status;
char * symname;
char * demangled;
std::cerr << std::endl << "stack trace:" << std::endl;
int addrlist_len = backtrace(addrlist, maxFrames);
char ** symbollist = backtrace_symbols(addrlist, addrlist_len);
for (int i = 1; i < addrlist_len; ++i)
{
offset[0] = 0;
addr[0] = 0;
demangled = NULL;
// LINUX FORMAT:
// ./sam2svg [0x473b8c]
// /lib/libc.so.6 [0x7f40d2526f60]
// ./sam2svg(_Z2f3v+0x10) [0x47200c]
// ./sam2svg(_Z2f2v+0xd) [0x472021]
// ./sam2svg(main+0x1367) [0x4735fc]
// /lib/libc.so.6(__libc_start_main+0xe6) [0x7f40d25131a6]
//
if (3 == sscanf(symbollist[i], "%*[^(](%4095[^+]+%[^)]) %s", temp, offset, addr))
{
symname = temp;
if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status)))
{
symname = demangled;
}
}
// MAC OS X FORMAT:
// 1 sam2svg 0x0000000100003a39 _ZN5seqanL28signalHandlerPrintStackTraceEi + 21
// 2 libSystem.B.dylib 0x00007fff87a6d67a _sigtramp + 26
// 3 libSystem.B.dylib 0x00007fff87a76df7 tiny_free_do_recirc_to_depot + 980
// 4 sam2svg 0x00000001000021b9 _Z2f2v + 9
// 5 sam2svg 0x00000001000034b1 main + 4546
// 6 sam2svg 0x0000000100002190 start + 52
else if (3 == sscanf(symbollist[i], "%*d %*s %s %s %*s %s", addr, temp, offset))
{
symname = temp;
if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status)))
{
symname = demangled;
}
}
// LINUX FORMAT:
// ./sam2svg [0x473b8c]
// /lib/libc.so.6 [0x7f40d2526f60]
else if (2 == sscanf(symbollist[i], "%s %s", temp, addr))
{
symname = temp;
}
// DEFAULT:
else
{
symname = symbollist[i];
}
std::cerr << std::setw(3) << i - 1;
std::cerr << std::setw(20) << addr;
std::cerr << " " << symname;
if (offset[0] != 0)
std::cerr << " + " << offset;
std::cerr << std::endl;
free(demangled);
}
std::cerr << std::endl;
// Only the array must be freed according to man page, not the contents.
free(symbollist);
}
static void signalHandlerPrintStackTrace(int signum)
{
std::cerr << std::endl;
printStackTrace(20);
signal(signum, SIG_DFL);
kill(getpid(), signum);
}
inline int _deploySignalHandlers()
{
signal(SIGSEGV, signalHandlerPrintStackTrace); // segfault
signal(SIGFPE, signalHandlerPrintStackTrace); // divide by zero
// ...
return 0;
}
#if SEQAN_ENABLE_DEBUG
// automatically deploy signal handlers that output the stack trace on a trap (in debug mode)
template <typename T>
struct SignalHandlersDummy_
{
static const int i;
};
template <typename T>
const int SignalHandlersDummy_<T>::i = _deploySignalHandlers();
namespace {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-variable"
#endif // ifdef __clang__
volatile int signalHandlersDummy_ = SignalHandlersDummy_<void>::i;
#ifdef __clang__
#pragma clang diagnostic pop
#endif // ifdef __clang__
}
#endif // #if SEQAN_ENABLE_DEBUG
#endif // defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO
// Namespace for the testing infrastructure.
//
// This namespace contains the variables and functions that are used
// in the macros below to perform the tests.
namespace ClassTest {
// Raised when an assertion fails in test mode.
struct AssertionFailedException {};
// Container for static global data for the tests.
struct StaticData
{
// Number of tests that were run.
static int & testCount()
{
static int result = 0;
return result;
}
// Number of errors that occurred.
static int & errorCount()
{
static int result = 0;
return result;
}
// Number of skipped tests.
static int & skippedCount()
{
static int result = 0;
return result;
}
// Flag whether there was an error in this test.
static bool & thisTestOk()
{
static bool result = 0;
return result;
}
// Flag whether this test was skipped.
static bool & thisTestSkipped()
{
static bool result = 0;
return result;
}
// Name of the current test.
static const char * & currentTestName()
{
const char * defaultValue = "";
static const char * result = const_cast<char *>(defaultValue);
return result;
}
// Base path to the binary. Extrapolated from __FILE__.
static char * & basePath()
{
const char * defaultValue = ".";
static char * result = const_cast<char *>(defaultValue);
return result;
}
static char const * _computePathToRoot()
{
// Get path to core (this file resides in core/include/seqan/basic).
const char * file = __FILE__;
int pos = -1;
for (size_t i = 0; i < strlen(file) - strlen("core"); ++i)
{
if (strncmp(file + i, "core", strlen("core")) == 0)
{
pos = i;
}
}
for (; pos > 0 && *(file + pos - 1) != '/' && *(file + pos - 1) != '\\'; --pos)
continue;
if (pos == -1)
{
std::cerr << "Could not extrapolate path to repository from __FILE__ == \""
<< __FILE__ << "\"" << std::endl;
exit(1);
}
static char buffer[1024];
strncpy(&buffer[0], file, pos);
buffer[pos - 1] = '\0';
return &buffer[0];
}
// Base path to the directory containing "core" and "extras."
// Extrapolated from __FILE__.
static char const * pathToRoot()
{
const char * result = 0;
if (!result)
result = _computePathToRoot();
return result;
}
// Total number of checkpoints in header file.
static int & totalCheckPointCount()
{
static int result = 0;
return result;
}
// Total number of checkpoints found in binary files.
static int & foundCheckPointCount()
{
static int result = 0;
return result;
}
// Names of temporary files as returned by tempFileName. This
// global state is used to remove any existing such files
// after completing the testsuite.
static::std::vector<std::string> & tempFileNames()
{
static::std::vector<std::string> filenames;
return filenames;
}
};
// Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet.
// TODO(holtgrew): Not used yet and Windows code does not work.
/*
inline
int openTempFile() {
#ifdef PLATFORM_WINDOWS
char * fileName = _tempnam(NULL, "SQN");
if (!fileName) {
::std::cerr << "Cannot create a unique temporary filename" << ::std::endl;
exit(1);
}
int result = open(fileName, _O_RDWR | OPEN_TEMPORARY);
free(fileName);
return result;
#else // A Unix...
char filenameBuffer[100];
strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX");
int result = mkstemp(filenameBuffer);
unlink(filenameBuffer);
return result;
#endif // ifdef PLATFORM_WINDOWS
}
*/
// Return the path to a temporary file, in a static buffer in this
// function. This is not thread safe!
inline
const char * tempFileName()
{
//IOREV _duplicate_ overlaps with some stuff in system/file_sync.h, should be moved to io-module
static char fileNameBuffer[1000];
#ifdef PLATFORM_WINDOWS_VS
static char filePathBuffer[1000];
// Gets the temp path env string (no guarantee it's a valid path).
DWORD dwRetVal = 0;
dwRetVal = GetTempPath(1000, // length of the buffer
filePathBuffer); // buffer for path
if (dwRetVal > 1000 || (dwRetVal == 0))
{
std::cerr << "GetTempPath failed" << std::endl;
exit(1);
}
UINT uRetVal = 0;
uRetVal = GetTempFileName(filePathBuffer, // directory for tmp files
TEXT("SEQAN."), // temp file name prefix
0, // create unique name
fileNameBuffer); // buffer for name
if (uRetVal == 0)
{
std::cerr << "GetTempFileName failed" << std::endl;
exit(1);
}
DeleteFile(fileNameBuffer);
CreateDirectoryA(fileNameBuffer, NULL);
StaticData::tempFileNames().push_back(fileNameBuffer);
strcat(fileNameBuffer, "\\test_file");
return fileNameBuffer;
#else // ifdef PLATFORM_WINDOWS_VS
strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX");
#ifdef PLATFORM_WINDOWS_MINGW
// There is no mkstemp in MinGW but it does not complain about tmpnam.
tmpnam(fileNameBuffer);
#else // ifdef PLATFORM_WINDOWS_MINGW
int _tmp = mkstemp(fileNameBuffer);
(void) _tmp;
unlink(fileNameBuffer);
mkdir(fileNameBuffer, 0777);
StaticData::tempFileNames().push_back(fileNameBuffer);
strcat(fileNameBuffer, "/test_file");
#endif // #ifdef PLATFORM_WINDOWS_MINGW
return fileNameBuffer;
#endif // ifdef PLATFORM_WINDOWS_VS
}
// Initialize the testing infrastructure.
//
// Used through SEQAN_BEGIN_TESTSUITE(test_name)
inline
void beginTestSuite(const char * testSuiteName, const char * argv0)
{
// First things first: Print test suite name and current debug level.
std::cout << "TEST SUITE " << testSuiteName << std::endl;
printDebugLevel(std::cout);
(void)testSuiteName;
StaticData::testCount() = 0;
StaticData::skippedCount() = 0;
StaticData::errorCount() = 0;
StaticData::totalCheckPointCount() = 0;
StaticData::foundCheckPointCount() = 0;
// Get path to argv0.
const char * end = argv0;
const char * ptr = std::min(strchr(argv0, '\\'), strchr(argv0, '/')); // On Windows, we can have both \ and /.
for (; ptr != 0; ptr = std::min(strchr(ptr + 1, '\\'), strchr(ptr + 1, '/')))
end = ptr;
int rpos = end - argv0;
if (rpos <= 0)
{
StaticData::basePath() = new char[2];
strcpy(StaticData::basePath(), ".");
}
else
{
int len = rpos;
StaticData::basePath() = new char[len];
strncpy(StaticData::basePath(), argv0, len);
}
#ifdef PLATFORM_WINDOWS_VS
// Set CRT reporting such that everything goes to stderr and there are
// no popups causing timeouts.
_set_error_mode(_OUT_TO_STDERR);
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
_CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
_CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
#endif // PLATFORM_WINDOWS_VS
}
// Run test suite finalization.
//
// Used through SEQAN_END_TESTSUITE
//
// Prints a bottom banner with the error count and returns the
// program's return code.
inline
int endTestSuite()
{
delete[] StaticData::basePath();
std::cout << "**************************************" << std::endl;
std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl;
std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl;
std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl;
std::cout << "--------------------------------------" << std::endl;
std::cout << " Total Tests: " << StaticData::testCount() << std::endl;
std::cout << " Skipped: " << StaticData::skippedCount() << std::endl;
std::cout << " Errors: " << StaticData::errorCount() << std::endl;
std::cout << "**************************************" << std::endl;
// TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1;
/*
if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount())
return 1;
*/
// Delete all temporary files that still exist.
for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i)
{
#ifdef PLATFORM_WINDOWS
HANDLE hFind;
WIN32_FIND_DATA data;
std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("\\*");
hFind = FindFirstFile(temp.c_str(), &data);
if (hFind != INVALID_HANDLE_VALUE)
{
do
{
std::string tempp = StaticData::tempFileNames()[i].c_str() + std::string("\\") + data.cFileName;
DeleteFile(tempp.c_str());
}
while (FindNextFile(hFind, &data));
FindClose(hFind);
}
RemoveDirectory(StaticData::tempFileNames()[i].c_str());
#else // #ifdef PLATFORM_WINDOWS
DIR * dpdf;
struct dirent * epdf;
dpdf = opendir(StaticData::tempFileNames()[i].c_str());
if (dpdf != NULL)
{
while ((epdf = readdir(dpdf)) != NULL)
{
std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("/") + std::string(epdf->d_name);
unlink(temp.c_str());
}
}
rmdir(StaticData::tempFileNames()[i].c_str());
#endif // #ifdef PLATFORM_WINDOWS
}
if (StaticData::errorCount() != 0)
return 1;
return 0;
}
// Run test initialization.
inline
void beginTest(const char * testName)
{
StaticData::currentTestName() = testName;
StaticData::thisTestOk() = true;
StaticData::thisTestSkipped() = false;
StaticData::testCount() += 1;
}
// Run test finalization.
inline
void endTest()
{
if (StaticData::thisTestSkipped())
{
std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl;
}
else if (StaticData::thisTestOk())
{
std::cout << StaticData::currentTestName() << " OK" << std::endl;
}
else
{
std::cerr << StaticData::currentTestName() << " FAILED" << std::endl;
}
}
// Marks the current test as "skipped".
inline
void skipCurrentTest()
{
StaticData::thisTestSkipped() = true;
StaticData::skippedCount() += 1;
}
// Called by the macro SEQAN_ASSERT_FAIL.
inline void forceFail(const char * file, int line,
const char * comment, ...)
{
StaticData::errorCount() += 1;
std::cerr << file << ":" << line << " FAILED! ";
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
}
// Similar to forceFail above, but accepting a va_list parameter.
inline void vforceFail(const char * file, int line,
const char * comment, va_list argp)
{
StaticData::errorCount() += 1;
std::cerr << file << ":" << line << " FAILED! ";
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
}
// Same as forceFail above, but with comment set to 0.
inline void forceFail(const char * file, int line)
{
forceFail(file, line, 0);
}
// Called by the macro SEQAN_ASSERT_EQ.
//
// Tests that the given two value are equal. Returns true iff the
// two values are equal.
template <typename T1, typename T2>
bool testEqual(char const * file, int line,
T1 const & value1, char const * expression1,
T2 const & value2, char const * expression2,
char const * comment, ...)
{
if (!(value1 == value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " == " << expression2 << " was: " << value1
<< " != " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testEqual above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 == value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " == " << expression2 << " was: " << value1
<< " != " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testEqual above, but with comment set to 0.
template <typename T1, typename T2>
bool testEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testEqual(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_IN_DELTA.
//
// Tests that the given two value are equal. Returns true iff the
// two values are equal.
template <typename T1, typename T2, typename T3>
bool testInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3,
const char * comment, ...)
{
if (!(value1 >= value2 - value3 && value1 <= value2 + value3))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " in [" << expression2 << " - " << expression3
<< ", " << expression2 << " + " << expression3 << "] was: " << value1
<< " not in [" << value2 - value3 << ", " << value2 + value3 << "]";
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testInDelta above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2, typename T3>
bool vtestInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3,
const char * comment, va_list argp)
{
if (!(value1 >= value2 - value3 && value1 <= value2 + value3))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " in [" << expression2 << " - " << expression3
<< ", " << expression2 << " + " << expression3 << "] was: " << value1
<< " not in [" << value2 - value3 << ", " << value2 + value3 << "]";
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testInDelta above, but with comment set to 0.
template <typename T1, typename T2, typename T3>
bool testInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3)
{
return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0);
}
// Called by the macro SEQAN_ASSERT_NEQ.
//
// Tests that the given two value are not equal. Returns true iff
// the two values are equal.
template <typename T1, typename T2>
bool testNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 != value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " != " << expression2 << " was: " << value1
<< " == " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testNotEqual above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 != value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " != " << expression2 << " was: " << value1
<< " == " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testNotEqual above, but with comment set to 0.
template <typename T1, typename T2>
bool testNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testNotEqual(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_GEQ.
//
// Tests that the first value is greater than or equal to the
// second one. Returns true iff the test yields true.
template <typename T1, typename T2>
bool testGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 >= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " >= " << expression2 << " was: " << value1
<< " < " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testGeq above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 >= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " >= " << expression2 << " was: " << value1
<< " < " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testGeq above, but with comment set to 0.
template <typename T1, typename T2>
bool testGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testGeq(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_GT.
//
// Tests that the first value is greater than the second one.
// Returns true iff the test yields true.
template <typename T1, typename T2>
bool testGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 > value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " > " << expression2 << " was: " << value1
<< " <= " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testGt above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 > value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " > " << expression2 << " was: " << value1
<< " <= " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testGt above, but with comment set to 0.
template <typename T1, typename T2>
bool testGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testGt(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_LEQ.
//
// Tests that the first value is less than or equal to the second
// one. Returns true iff the test yields true.
template <typename T1, typename T2>
bool testLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 <= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " <= " << expression2 << " was: " << value1
<< " > " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testLeq above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 <= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " <= " << expression2 << " was: " << value1
<< " > " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testLeq above, but with comment set to 0.
template <typename T1, typename T2>
bool testLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testLeq(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_LT.
//
// Tests that the first value is greater than the second one.
// Returns true iff the test yields true.
template <typename T1, typename T2>
bool testLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 < value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " < " << expression2 << " was: " << value1
<< " >= " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testLt above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 < value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " < " << expression2 << " was: " << value1
<< " >= " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testLt above, but comment is 0.
template <typename T1, typename T2>
bool testLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testLt(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT.
//
// Test that the given argument evaluates to true.
template <typename T>
bool testTrue(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, ...)
{
if (!(value_))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be true but was " << (value_);
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testTrue above, but accepts a va_list instead of variadic
// parameters.
template <typename T>
bool vtestTrue(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, va_list argp)
{
if (!(value_))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be true but was " << (value_);
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testTrue above, but comment will automatically be set to 0.
template <typename T>
bool testTrue(const char * file, int line,
const T & value_, const char * expression_)
{
return testTrue(file, line, value_, expression_, 0);
}
// Called by the macro SEQAN_ASSERT.
//
// Test that the given argument evaluates to false.
template <typename T>
bool testFalse(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, ...)
{
if (value_)
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be false but was " << (value_);
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testFalse above, but accepts a va_list instead of variadic
// parameters.
template <typename T>
bool vtestFalse(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, va_list argp)
{
if (value_)
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be false but was " << (value_);
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testFalse above, but comment will automatically be set to 0.
template <typename T>
bool testFalse(const char * file, int line,
const T & value_, const char * expression_)
{
return testFalse(file, line, value_, expression_, 0);
}
// Represents a check point in a file.
struct CheckPoint
{
// Path to the file.
const char * file;
// Line in the file.
unsigned int line;
// Less-than comparator for check points.
bool operator<(const CheckPoint & other) const
{
int c = strcmp(file, other.file);
if (c < 0)
return true;
if (c == 0 && line < other.line)
return true;
return false;
}
};
// Wrapper for a set of check points.
// TODO(holtgrew): Simply store the set?
struct CheckPointStore
{
static::std::set<CheckPoint> & data()
{
static::std::set<CheckPoint> result;
return result;
}
};
// Puts the given check point into the CheckPointStore's data.
inline bool
registerCheckPoint(unsigned int line, const char * file)
{
const char * file_name = strrchr(file, '/');
const char * file_name_2 = strrchr(file, '\\');
if (file_name_2 > file_name)
file_name = file_name_2;
if (!file_name)
file_name = file;
else
++file_name;
CheckPoint cp = {file_name, line};
#ifdef _OMP
#pragma omp critical
#endif // #ifdef _OMP
CheckPointStore::data().insert(cp);
return true;
}
// Test whether the given check point exists in the check point
// store.
inline void
testCheckPoint(const char * file, unsigned int line)
{
StaticData::totalCheckPointCount() += 1;
CheckPoint cp = {file, line};
if (CheckPointStore::data().find(cp) == CheckPointStore::data().end())
{
std::cerr << file << ":" << line << " -- Check point lost."
<< std::endl;
return;
}
StaticData::foundCheckPointCount() += 1;
}
// Verify the check points for the given file.
inline void
verifyCheckPoints(const char * file)
{
char const * file_name = strrchr(file, '/');
char const * file_name_2 = strrchr(file, '\\');
if (file_name_2 > file_name)
file_name = file_name_2;
if (!file_name)
file_name = file;
else
++file_name;
int len = strlen(StaticData::pathToRoot()) +
strlen("/") + strlen(file) + 1;
char * absolutePath = new char[len];
absolutePath[0] = '\0';
strcat(absolutePath, StaticData::pathToRoot());
strcat(absolutePath, "/");
strcat(absolutePath, file);
FILE * fl = ::std::fopen(absolutePath, "r");
delete[] absolutePath;
if (!fl)
{
std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl;
}
unsigned int line_number = 1;
char buf[1 << 16];
while (::std::fgets(buf, sizeof(buf), fl))
{
if (::std::strstr(buf, "SEQAN_CHECKPOINT"))
{
testCheckPoint(file_name, line_number);
}
++line_number;
}
::std::fclose(fl);
}
#if SEQAN_ENABLE_TESTING
// If in testing mode then raise an AssertionFailedException.
inline void fail()
{
StaticData::thisTestOk() = false;
printStackTrace(20);
throw AssertionFailedException();
}
#else
// If not in testing mode then quit with an abort.
inline void fail()
{
printStackTrace(20);
abort();
}
#endif // #if SEQAN_ENABLE_TESTING
} // namespace ClassTest
/**
.Macro.SEQAN_DEFINE_TEST
..summary:Expand to test definition.
..cat:Testing & Debugging
..signature:SEQAN_DEFINE_TEST(test_name)
..param.test_name:The name of the test.
..remarks:This macro expands to the definition of a $void$ function with $SEQAN_TEST_ + test_name$ as its name.
..example.code:
SEQAN_DEFINE_TEST(test_name)
{
SEQAN_ASSERT_LT(0, 3);
}
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro expands to function header for one test.
#define SEQAN_DEFINE_TEST(test_name) \
template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> \
void SEQAN_TEST_ ## test_name()
/**
.Macro.SEQAN_BEGIN_TESTSUITE
..summary:Expand to a test suite beginning.
..cat:Testing & Debugging
..signature:SEQAN_BEGIN_TESTSUITE(name)
..param.name:The name of the test suite.
..remarks:This macro expands to a $main()$ function and some initialization code that sets up the test system.
..example.code:
#include <seqan/basic.h>
SEQAN_BEGIN_TESTSUITE(test_foo)
{
SEQAN_CALL_TEST(test_foo_my_test);
}
SEQAN_END_TESTSUITE
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_END_TESTSUITE
*/
#if SEQAN_ENABLE_TESTING
// This macro expands to startup code for a test file.
#define SEQAN_BEGIN_TESTSUITE(suite_name) \
int main(int argc, char ** argv) { \
(void) argc; \
::seqan::ClassTest::beginTestSuite(# suite_name, argv[0]);
/**
.Macro.SEQAN_END_TESTSUITE
..summary:Expand to a test suite ending.
..cat:Testing & Debugging
..signature:SEQAN_END_TESTSUITE
..remarks:This macro expands to finalization code for a test suite.
..example.code:
#include <seqan/basic.h>
SEQAN_BEGIN_TESTSUITE(test_foo)
{
SEQAN_CALL_TEST(test_foo_my_test);
}
SEQAN_END_TESTSUITE
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
*/
// This macro expands to shutdown code for a test file.
#define SEQAN_END_TESTSUITE \
return ::seqan::ClassTest::endTestSuite(); \
}
/**
.Macro.SEQAN_CALL_TEST
..summary:Expand to calling a test.
..cat:Testing & Debugging
..signature:SEQAN_CALL_TEST(test_name)
..param.test_name:The name of the test.
..remarks:This expects the test to be defined with @Macro.SEQAN_DEFINE_TEST@. This macro will expand to code that calls the code inside a try/catch block. Use this macro within a test suite, only.
..example.code:
// Within a test suite.
SEQAN_CALL_TEST(test_name);
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro expands to code to call a given test.
#define SEQAN_CALL_TEST(test_name) \
do { \
::seqan::ClassTest::beginTest(# test_name); \
try { \
SEQAN_TEST_ ## test_name<true>(); \
} catch (::seqan::ClassTest::AssertionFailedException e) { \
/* Swallow exception, go on with next test. */ \
(void) e; /* Get rid of unused variable warning. */ \
} \
::seqan::ClassTest::endTest(); \
} while (false)
/**
.Macro.SEQAN_SKIP_TEST
..cat:Testing & Debugging
..summary:Force the test to return without failing and mark it as skipped.
..signature:SEQAN_SKIP_TEST
..example.code:
SEQAN_DEFINE_TEST(test_skipped)
{
SEQAN_SKIP_TEST;
}
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro returns from the current function and logs a "skipped"
// event for the current test.
#define SEQAN_SKIP_TEST \
do { \
::seqan::ClassTest::skipCurrentTest(); \
return; \
} while (false)
#endif // #if SEQAN_ENABLE_TESTING
// variadic macros are not supported by VS 2003 and before
#if !defined(_MSC_VER) || (_MSC_VER >= 1400)
#if SEQAN_ENABLE_DEBUG
/**
.Macro.SEQAN_ASSERT
..cat:Assertions
..summary:Test that the given expression can be coerced to $true$.
..signature:SEQAN_ASSERT(expression)
..signature:SEQAN_ASSERT_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT(0); // will fail
SEQAN_ASSERT(1); // will run through
SEQAN_ASSERT_MSG(0, "message %d", 2); // Will fail with message.
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_NOT
..cat:Assertions
..summary:Test that the given expression can be coerced to $false$.
..signature:SEQAN_ASSERT(expression)
..signature:SEQAN_ASSERT_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_NOT(0); // will run through
SEQAN_ASSERT_NOT(1); // will fail
SEQAN_ASSERT_NOT_MSG(0, "msg %s", "test"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_EQ
..cat:Assertions
..summary:Test that two given expressions are equal, as defined by the matching call to the $operator=(,)$.
..signature:SEQAN_ASSERT_EQ(expression1, expression2)
..signature:SEQAN_ASSERT_EQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_EQ(0, false); // will run through
SEQAN_ASSERT_EQ(1, false); // will fail
SEQAN_ASSERT_EQ(1, "foo"); // will not compile
SEQAN_ASSERT_EQ_MSG(1, false, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_NEQ
..cat:Assertions
..summary:Test that two given expressions are not equal, as defined by the matching call to the $operator!=(,)$.
..signature:SEQAN_ASSERT_NEQ(expression)
..signature:SEQAN_ASSERT_NEQ_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_NEQ(0, false); // will fail
SEQAN_ASSERT_NEQ(1, false); // will run through
SEQAN_ASSERT_NEQ(1, "foo"); // will not compile
SEQAN_ASSERT_NEQ_MSG(1, false, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_LT
..cat:Assertions
..summary:Test that the two given expressions are in the less-than relation as defined by the matching call to operator<(,).
..signature:SEQAN_ASSERT_LT(expression1, expression2)
..signature:SEQAN_ASSERT_LT(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_LT(0, 1); // will run through
SEQAN_ASSERT_LT(1, 1); // will not run through
SEQAN_ASSERT_LT_MSG(1, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_LEQ
..cat:Assertions
..summary:Test that the two given expressions are in the less-than-or-equal relation as defined by the matching call to operator<=(,).
..signature:SEQAN_ASSERT_LEQ(expression1, expression2)
..signature:SEQAN_ASSERT_LEQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_LEQ(1, 1); // will run through
SEQAN_ASSERT_LEQ(1, 2); // will not run through
SEQAN_ASSERT_LEQ_MSG(1, 2, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_GT
..cat:Assertions
..summary:Test that the two given expressions are in the greather-than relation as defined by the matching call to operator>(,).
..signature:SEQAN_ASSERT_GT(expression1, expression2)
..signature:SEQAN_ASSERT_GT_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_GT(2, 1); // will run through
SEQAN_ASSERT_GT(1, 1); // will not run through
SEQAN_ASSERT_GT_MSG(1, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_GEQ
..cat:Assertions
..summary:Test that the two given expressions are in the greater-than-or-equal relation as defined by the matching call to operator>=(,).
..signature:SEQAN_ASSERT_GEQ(expression1, expression2)
..signature:SEQAN_ASSERT_GEQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_GEQ(1, 1); // will run through
SEQAN_ASSERT_GEQ(0, 1); // will not run through
SEQAN_ASSERT_GEQ_MSG(0, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_IN_DELTA
..cat:Assertions
..summary:Test that the given expression can be coerced to $true$.
..signature:SEQAN_ASSERT_IN_DELTA(x, y, delta)
..signature:SEQAN_ASSERT_IN_DELTA_MSG(x, y, delta, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_IN_DELTA(0, 0, 0.1); // will run through
SEQAN_ASSERT_IN_DELTA(1, -2, 1); // will fail
SEQAN_ASSERT_IN_DELTA(1, "foo"); // will not compile
SEQAN_ASSERT_IN_DELTA_MSG(1, 0, 0.1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
*/
// Force a test failure.
//
// Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos);
#define SEQAN_ASSERT_FAIL(...) \
do { \
::seqan::ClassTest::forceFail(__FILE__, __LINE__, \
__VA_ARGS__); \
::seqan::ClassTest::fail(); \
} while (false)
// Equality assertion without a comment.
//
// Usage: SEQAN_ASSERT_EQ(4, 4);
#define SEQAN_ASSERT_EQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Equality assertion with a comment.
//
// Usage: SEQAN_ASSERT_EQ(4, 4);
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// In-delta-environment assertion without a comment.
//
// Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1);
#define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \
do { \
if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
(_arg3), # _arg3)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// In-delta-environment assertion witha comment.
//
// Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1");
#define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \
do { \
if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
(_arg3), # _arg3, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Inequality assertion without a comment.
//
// Usage: SEQAN_ASSERT_NEQ(4, 5);
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Inequality assertion with a comment.
//
// Usage: SEQAN_ASSERT_NEQ(4, 5);
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than-or-equal assertion without a comment.
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than-or-equal assertion with a comment.
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than assertion without a comment.
#define SEQAN_ASSERT_LT(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than assertion with a comment.
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than-or-equal assertion without a comment.
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than-or-equal assertion with a comment.
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than assertion without a comment.
#define SEQAN_ASSERT_GT(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than assertion with a comment.
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.;
// Trueness assertion with a comment.
//
// Usage: SEQAN_ASSERT(false);
#define SEQAN_ASSERT(_arg1) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.;
// Trueness assertion with a comment.
#define SEQAN_ASSERT_MSG(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Falseness assertion without a comment.
//
// Usage: SEQAN_ASSERT_NOT(false);
#define SEQAN_ASSERT_NOT(_arg1) \
do { \
if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \
(_arg1), # _arg1)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Falseness assertion with a comment.
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
#else // #if SEQAN_ENABLE_DEBUG
#define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT(_arg1) do {} while (false)
#define SEQAN_ASSERT_MSG(_arg1, ...) do {} while (false)
#define SEQAN_ASSERT_NOT(_arg1) do {} while (false)
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false)
#define SEQAN_ASSERT_FAIL(...) do {} while (false)
#endif // #if SEQAN_ENABLE_DEBUG
#else // no variadic macros
#if SEQAN_ENABLE_DEBUG
inline void SEQAN_ASSERT_FAIL(const char * comment, ...)
{
va_list args;
va_start(args, comment);
::seqan::ClassTest::vforceFail("", 0, comment, args);
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3)
{
if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1>
void SEQAN_ASSERT(T1 const & _arg1)
{
if (!::seqan::ClassTest::testTrue("", 0, _arg1, ""))
::seqan::ClassTest::fail();
}
template <typename T1>
void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1>
void SEQAN_ASSERT_NOT(T1 const & _arg1)
{
if (!::seqan::ClassTest::testFalse("", 0, _arg1, ""))
::seqan::ClassTest::fail();
}
template <typename T1>
void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
#else // #if SEQAN_ENABLE_DEBUG
inline void SEQAN_ASSERT_FAIL(const char * comment, ...) {}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) {}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1>
void SEQAN_ASSERT(T1 const & _arg1) {}
template <typename T1>
void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) {}
template <typename T1>
void SEQAN_ASSERT_NOT(T1 const & _arg1) {}
template <typename T1>
void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) {}
#endif // #if SEQAN_ENABLE_DEBUG
#endif // no variadic macros
// Returns a string (of type char*) with the path to the called binary.
//
// Use this to locate files relative to the test binary.
#define SEQAN_PROGRAM_PATH \
::seqan::ClassTest::StaticData::basePath()
// TODO(holtgrew): Subject to change wiht restructuring.
/**
.Macro.SEQAN_PATH_TO_ROOT
..cat:Testing & Debugging
..summary:Return path to the checkout root directory (i.e. containing core/extras).
..description.note:This only works when using the SeqAn SVN checkout!
..returns:$char const *$, string with the path to the parent directory of the tests directory.
..signature:SEQAN_PATH_TO_ROOT()
..remarks:The pointed to string is initialized on program startup by the code generated by @Macro.SEQAN_BEGIN_TESTSUITE@.
..example.code:
const char *p = SEQAN_PATH_TO_ROOT);
char buffer[1000];
strcpy(buffer, p);
strcat(buffer, "/tests/files/example.txt");
FILE *f = fopen(buffer, "w");
fprintf(f, "Test Data");
fclose(f);
..see:Macro.SEQAN_TEMP_FILENAME
*/
// Returns a const char * string with the path to the projects directory.
#define SEQAN_PATH_TO_ROOT() \
::seqan::ClassTest::StaticData::pathToRoot()
// Returns the POSIX int file handle to an open file.
// TODO(holtgrewe): Uncomment if openTempFile has been implemented.
// #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile())
/**
.Macro.SEQAN_TEMP_FILENAME
..cat:Testing & Debugging
..summary:Generates the name to a temporary file.
..returns:$char const *$, string with the path to a temporary file.
..signature:SEQAN_TEMP_FILENAME()
..remarks:The pointed to string is stored in a buffer and is overwritten by the next call to this macro. Copy it out if you need it.
..example.code:
const char *p = SEQAN_TEMP_FILENAME();
buffer char tempFilename[1000];
strcpy(tempFilename, p);
FILE *f = fopen(tempFilename, "w");
fprintf(f, "Test Data");
fclose(f);
..see:Macro.SEQAN_PATH_TO_ROOT
*/
// Returns a temporary filename.
#define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName())
/**
.Macro.SEQAN_VERIFY_CHECKPOINTS
..cat:Testing & Debugging
..summary:Verify check points for the given file name.
..signature:SEQAN_VERIFY_CHECKPOINTS(path)
..param.path:Path to the file to verify check points for. Relative to parent directory of tests.
..example.code:
SEQAN_VERIFY_CHECKPOINTS("core/include/seqan/basic_alphabet.h");
..see:Macro.SEQAN_CHECKPOINT
.Macro.SEQAN_CHECKPOINT
..cat:Testing & Debugging
..summary:Generate a check point.
..signature:SEQAN_CHECKPOINT
..remarks:Whever the code executes the instructions generated by this macro, the check point for this line will be set in global testing state. Use @Macro.SEQAN_VERIFY_CHECKPOINTS@ to verify whether all checkpoints have been reached in a file up to this point.
SEQAN_CHECKPOINT;
..see:Macro.SEQAN_VERIFY_CHECKPOINTS
*/
#if SEQAN_ENABLE_CHECKPOINTS
// Create a check point at the point where the macro is placed.
// TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent.
#define SEQAN_CHECKPOINT \
::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__);
// Call the check point verification code for the given file.
#define SEQAN_VERIFY_CHECKPOINTS(filename) \
::seqan::ClassTest::verifyCheckPoints(filename)
#else // #if SEQAN_ENABLE_CHECKPOINTS
#define SEQAN_CHECKPOINT
// If checkpoints are to be verified if testing is disabled then print
// a warning.
#define SEQAN_VERIFY_CHECKPOINTS(filename) \
do { \
fprintf(stderr, ("WARNING: Check point verification is " \
"disabled. Trying to verify %s from %s:%d.\n"), \
filename, __FILE__, __LINE__); \
} while (false)
#endif // #if SEQAN_ENABLE_CHECKPOINTS
#if !SEQAN_ENABLE_TESTING
#define SEQAN_BEGIN_TESTSUITE(suite_name) \
int main(int argc, char ** argv) { \
(void) argc; \
(void) argv; \
fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n");
#define SEQAN_END_TESTSUITE \
return 0; \
}
#define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false)
#define SEQAN_SKIP_TEST do {} while (false)
#endif // #if !SEQAN_ENABLE_TESTING
} // namespace seqan
#endif // SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
|
general_basis_op.h | #ifndef _GENERAL_BASIS_OP_H
#define _GENERAL_BASIS_OP_H
#include <iostream>
#include <complex>
#include <algorithm>
#include <limits>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "misc.h"
#include "openmp.h"
namespace basis_general {
template<class I, class J, class K, class T,class P=signed char>
int general_op(general_basis_core<I,P> *B,
const int n_op,
const char opstr[],
const int indx[],
const std::complex<double> A,
const bool full_basis,
const npy_intp Ns,
const I basis[],
const J n[],
K row[],
K col[],
T M[]
)
{
int err = 0;
#pragma omp parallel
{
const int nt = B->get_nt();
const npy_intp chunk = std::max(Ns/(1000*omp_get_num_threads()),(npy_intp)1);
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
I r = basis[i];
std::complex<double> m = A;
int local_err = B->op(r,m,n_op,opstr,indx);;
if(local_err == 0){
P sign = 1;
for(int k=0;k<nt;k++){
g[k]=0;
}
K j = i;
if(r != basis[i]){
I rr = B->ref_state(r,g,sign);
if(full_basis){
j = Ns - (npy_intp)rr - 1;
}
else{
j = binary_search(Ns,basis,rr);
}
}
if(j >= 0){
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign * std::sqrt(double(n[j])/double(n[i]));
local_err = check_imag(m,&M[i]);
col[i]=i;
row[i]=j;
}
else{
col[i] = i;
row[i] = i;
M[i] = std::numeric_limits<T>::quiet_NaN();
}
}
else{
#pragma omp critical
err = local_err;
}
}
}
return err;
}
template<class I, class J, class K,class P=signed char>
int general_inplace_op(general_basis_core<I,P> *B,
const bool conjugate,
const bool transpose,
const int n_op,
const char opstr[],
const int indx[],
const std::complex<double> A,
const bool full_basis,
const npy_intp Ns,
const npy_intp nvecs,
const I basis[],
const J n[],
const K v_in[],
K v_out[])
{
int err = 0;
if(transpose){
if(conjugate){
#pragma omp parallel
{
const int nt = B->get_nt();
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(static)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
I r = basis[i];
std::complex<double> m = A;
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
P sign = 1;
npy_intp j = i;
if(r != basis[i]){
I rr = B->ref_state(r,g,sign);
if(full_basis){
j = Ns - (npy_intp)rr - 1;
}
else{
j = binary_search(Ns,basis,rr);
}
}
if(j >= 0){
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign * std::sqrt(double(n[j])/double(n[i]));
const K * v_in_col = v_in + j * nvecs;
K * v_out_row = v_out + i * nvecs;
for(int k=0;k<nvecs;k++){
const std::complex<double> ME = std::complex<double>(v_in_col[k]) * std::conj(m);
local_err = atomic_add(ME,&v_out_row[k]);
if(local_err){
break;
}
}
}
}
else{
#pragma omp critical
err = local_err;
}
}
}
}
else{
#pragma omp parallel
{
const int nt = B->get_nt();
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(static)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
I r = basis[i];
std::complex<double> m = A;
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
P sign = 1;
npy_intp j = i;
if(r != basis[i]){
I rr = B->ref_state(r,g,sign);
if(full_basis){
j = Ns - (npy_intp)rr - 1;
}
else{
j = binary_search(Ns,basis,rr);
}
}
if(j >= 0){
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign * std::sqrt(double(n[j])/double(n[i]));
const K * v_in_col = v_in + j * nvecs;
K * v_out_row = v_out + i * nvecs;
for(int k=0;k<nvecs;k++){
const std::complex<double> ME = std::complex<double>(v_in_col[k]) * m;
local_err = atomic_add(ME,&v_out_row[k]);
if(local_err){
break;
}
}
}
}
else{
#pragma omp critical
err = local_err;
}
}
}
}
}
else{
if(conjugate){
#pragma omp parallel
{
const int nt = B->get_nt();
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(static)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
I r = basis[i];
std::complex<double> m = A;
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
P sign = 1;
npy_intp j = i;
if(r != basis[i]){
I rr = B->ref_state(r,g,sign);
if(full_basis){
j = Ns - (npy_intp)rr - 1;
}
else{
j = binary_search(Ns,basis,rr);
}
}
if(j >= 0){
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign * std::sqrt(double(n[j])/double(n[i]));
const K * v_in_col = v_in + i * nvecs;
K * v_out_row = v_out + j * nvecs;
for(int k=0;k<nvecs;k++){
const std::complex<double> ME = std::complex<double>(v_in_col[k]) * std::conj(m);
local_err = atomic_add(ME,&v_out_row[k]);
if(local_err){
break;
}
}
}
}
else{
#pragma omp critical
err = local_err;
}
}
}
}
else{
#pragma omp parallel
{
const int nt = B->get_nt();
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(static)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
I r = basis[i];
std::complex<double> m = A;
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
P sign = 1;
npy_intp j = i;
if(r != basis[i]){
I rr = B->ref_state(r,g,sign);
if(full_basis){
j = Ns - (npy_intp)rr - 1;
}
else{
j = binary_search(Ns,basis,rr);
}
}
if(j >= 0){
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign * std::sqrt(double(n[j])/double(n[i]));
const K * v_in_col = v_in + i * nvecs;
K * v_out_row = v_out + j * nvecs;
for(int k=0;k<nvecs;k++){
const std::complex<double> ME = std::complex<double>(v_in_col[k]) * m;
local_err = atomic_add(ME,&v_out_row[k]);
if(local_err){
break;
}
}
}
}
else{
#pragma omp critical
err = local_err;
}
}
}
}
}
return err;
}
template<class I, class T, class P=signed char>
int general_op_bra_ket(general_basis_core<I,P> *B,
const int n_op,
const char opstr[],
const int indx[],
const std::complex<double> A,
const npy_intp Ns,
const I ket[], // col
I bra[], // row
T M[]
)
{
int err = 0;
#pragma omp parallel
{
const int nt = B->get_nt();
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(static)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
std::complex<double> m = A;
const I s = ket[i];
I r = ket[i];
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
P sign = 1;
if(r != s){ // off-diagonal matrix element
r = B->ref_state(r,g,sign);
// use check_state to determine if state is a representative (same routine as in make-general_basis)
double norm_r = B->check_state(r);
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
double norm_s = B->check_state(s);
m *= sign * std::sqrt(norm_r/norm_s);
local_err = check_imag(m,&M[i]); // assigns value to M[i]
bra[i] = r;
}
else{ // ref state in different particle number sector
M[i] = std::numeric_limits<T>::quiet_NaN();
bra[i] = s;
}
}
else{ // diagonal matrix element
m *= sign;
local_err = check_imag(m,&M[i]); // assigns value to M[i]
bra[i] = s;
}
}
else{
#pragma omp critical
err = local_err;
}
}
}
return err;
}
template<class I, class T, class P=signed char>
int general_op_bra_ket_pcon(general_basis_core<I,P> *B,
const int n_op,
const char opstr[],
const int indx[],
const std::complex<double> A,
const npy_intp Ns,
const std::set<std::vector<int>> &Np_set, // array with particle conserving sectors
const I ket[], // col
I bra[], // row
T M[]
)
{
int err = 0;
#pragma omp parallel
{
const std::set<std::vector<int>> Np_set_local = Np_set;
const int nt = B->get_nt();
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(static)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
std::complex<double> m = A;
const I s = ket[i];
I r = ket[i];
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
P sign = 1;
if(r != s){ // off-diagonal matrix element
r = B->ref_state(r,g,sign);
bool pcon_bool = B->check_pcon(r,Np_set_local);
if(pcon_bool){ // reference state within same particle-number sector(s)
// use check_state to determine if state is a representative (same routine as in make-general_basis)
double norm_r = B->check_state(r);
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
double norm_s = B->check_state(s);
m *= sign * std::sqrt(norm_r/norm_s);
local_err = check_imag(m,&M[i]); // assigns value to M[i]
bra[i] = r;
}
else{ // ref_state not a representative
M[i] = std::numeric_limits<T>::quiet_NaN();
bra[i] = s;
}
}
else{ // ref state in different particle number sector
M[i] = std::numeric_limits<T>::quiet_NaN();
bra[i] = s;
}
}
else{ // diagonal matrix element
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign;
local_err = check_imag(m,&M[i]); // assigns value to M[i]
bra[i] = s;
}
}
else{
#pragma omp critical
err = local_err;
}
}
}
return err;
}
}
#endif
|
test.c | #include <stdio.h>
#define NUM_DIVS 1000000
#define NUM_ITS 100
double calc_pi()
{
double div_width = 1.0/NUM_DIVS;
double curx = 0.0;
double cury = 0.0;
double sum = 0.0;
int i = 0;
#pragma omp parallel for private(curx, cury) reduction(+: sum)
for(i = 0; i < NUM_DIVS; i++)
{
curx = ((double)i + 0.5) * div_width;
cury = 4.0 / (1 + (curx * curx));
sum += div_width * cury;
}
return sum;
}
int main(int argc, char** argv)
{
int i = 0;
double pi = 0.0;
for(i = 0; i < NUM_ITS; i++)
{
pi = calc_pi();
}
printf("Pi: %1.16f\n", pi);
return 0;
}
|
rose_regression01.c | /*
* Contributed by Jeff Keasler
*
* Liao 2/10/2010
* */
#include "omp.h"
typedef double real8;
void foo(real8 *a,real8 *b,real8 *c,real8 *d,int len)
{
int icol;
int jrow;
int l;
for (l = 0; l <= len - 1; l += 1) {
int l8 = l * 8;
real8 e = d[l * 3 + 0];
real8 f = d[l * 3 + 1];
real8 g = d[l * 3 + 2];
real8 h = b[l];
real8 tmp[8];
#pragma omp parallel for private (icol) firstprivate (e,f,g)
for (icol = 0; icol <= 7; icol += 1) {
tmp[icol] = e * c[(icol + l8) * 4 + 1] + f * c[(icol + l8) * 4 + 2] + g * c[(icol + l8) * 4 + 3];
}
#pragma omp parallel for private (icol,jrow) firstprivate (l8,h)
for (jrow = 0; jrow <= 7; jrow += 1) {
real8 hj1 = h * c[(jrow + l8) * 4];
#pragma omp parallel for private (icol) firstprivate (hj1)
for (icol = 0; icol <= 7; icol += 1) {
a[icol + (jrow + l8) * 8] += hj1 * tmp[icol];
}
}
}
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=0.0;
count=0;
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < minimum)
minimum=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
minimum-=maximum;
if (minimum < 0.0)
minimum=0.0;
pixel=minimum;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
GB_binop__isne_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint16)
// A*D function (colscale): GB (_AxD__isne_uint16)
// D*A function (rowscale): GB (_DxB__isne_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint16)
// C=scalar+B GB (_bind1st__isne_uint16)
// C=scalar+B' GB (_bind1st_tran__isne_uint16)
// C=A+scalar GB (_bind2nd__isne_uint16)
// C=A'+scalar GB (_bind2nd_tran__isne_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT16 || GxB_NO_ISNE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zboxloop.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "_hypre_utilities.h"
#include "HYPRE_struct_ls.h"
#include "HYPRE_krylov.h"
#include "_hypre_struct_mv.h"
/*--------------------------------------------------------------------------
* Test driver to time new boxloops and compare to the old ones
*--------------------------------------------------------------------------*/
hypre_int
main( hypre_int argc,
char *argv[] )
{
HYPRE_Int arg_index;
HYPRE_Int print_usage;
HYPRE_Int nx, ny, nz;
HYPRE_Int P, Q, R;
HYPRE_Int time_index;
HYPRE_Int num_procs, myid;
HYPRE_Int dim;
HYPRE_Int rep, reps, fail, sum;
HYPRE_Int size;
hypre_Box *x1_data_box, *x2_data_box, *x3_data_box, *x4_data_box;
HYPRE_Int xi1, xi2, xi3, xi4;
HYPRE_Real *xp1, *xp2, *xp3, *xp4;
hypre_Index loop_size, start, unit_stride, index;
/*-----------------------------------------------------------
* Initialize some stuff
*-----------------------------------------------------------*/
/* Initialize MPI */
hypre_MPI_Init(&argc, &argv);
hypre_MPI_Comm_size(hypre_MPI_COMM_WORLD, &num_procs );
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid );
/*-----------------------------------------------------------
* Set defaults
*-----------------------------------------------------------*/
dim = 3;
nx = 10;
ny = 10;
nz = 10;
P = num_procs;
Q = 1;
R = 1;
/*-----------------------------------------------------------
* Parse command line
*-----------------------------------------------------------*/
print_usage = 0;
arg_index = 1;
while (arg_index < argc)
{
if ( strcmp(argv[arg_index], "-n") == 0 )
{
arg_index++;
nx = atoi(argv[arg_index++]);
ny = atoi(argv[arg_index++]);
nz = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-P") == 0 )
{
arg_index++;
P = atoi(argv[arg_index++]);
Q = atoi(argv[arg_index++]);
R = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-d") == 0 )
{
arg_index++;
dim = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-help") == 0 )
{
print_usage = 1;
break;
}
else
{
arg_index++;
}
}
/*-----------------------------------------------------------
* Print usage info
*-----------------------------------------------------------*/
if ( (print_usage) && (myid == 0) )
{
hypre_printf("\n");
hypre_printf("Usage: %s [<options>]\n", argv[0]);
hypre_printf("\n");
hypre_printf(" -n <nx> <ny> <nz> : problem size per block\n");
hypre_printf(" -P <Px> <Py> <Pz> : processor topology\n");
hypre_printf(" -d <dim> : problem dimension (2 or 3)\n");
hypre_printf("\n");
}
if ( print_usage )
{
exit(1);
}
/*-----------------------------------------------------------
* Check a few things
*-----------------------------------------------------------*/
if ((P*Q*R) > num_procs)
{
if (myid == 0)
{
hypre_printf("Error: PxQxR is more than the number of processors\n");
}
exit(1);
}
else if ((P*Q*R) < num_procs)
{
if (myid == 0)
{
hypre_printf("Warning: PxQxR is less than the number of processors\n");
}
}
/*-----------------------------------------------------------
* Initialize some stuff
*-----------------------------------------------------------*/
hypre_SetIndex3(start, 1, 1, 1);
hypre_SetIndex3(loop_size, nx, ny, nz);
hypre_SetIndex3(unit_stride, 1, 1, 1);
x1_data_box = hypre_BoxCreate(dim);
x2_data_box = hypre_BoxCreate(dim);
x3_data_box = hypre_BoxCreate(dim);
x4_data_box = hypre_BoxCreate(dim);
hypre_SetIndex3(hypre_BoxIMin(x1_data_box), 0, 0, 0);
hypre_SetIndex3(hypre_BoxIMax(x1_data_box), nx+1, ny+1, nz+1);
hypre_CopyBox(x1_data_box, x2_data_box);
hypre_CopyBox(x1_data_box, x3_data_box);
hypre_CopyBox(x1_data_box, x4_data_box);
size = (nx+2)*(ny+2)*(nz+2);
xp1 = hypre_CTAlloc(HYPRE_Real, size);
xp2 = hypre_CTAlloc(HYPRE_Real, size);
xp3 = hypre_CTAlloc(HYPRE_Real, size);
xp4 = hypre_CTAlloc(HYPRE_Real, size);
reps = 1000000000/(nx*ny*nz+1000);
/*-----------------------------------------------------------
* Print driver parameters
*-----------------------------------------------------------*/
if (myid == 0)
{
hypre_printf("Running with these driver parameters:\n");
hypre_printf(" (nx, ny, nz) = (%d, %d, %d)\n", nx, ny, nz);
hypre_printf(" (Px, Py, Pz) = (%d, %d, %d)\n", P, Q, R);
hypre_printf(" dim = %d\n", dim);
hypre_printf(" reps = %d\n", reps);
}
/*-----------------------------------------------------------
* Check new boxloops
*-----------------------------------------------------------*/
/* xp1 is already initialized to 0 */
zypre_BoxLoop1Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE,xi1) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop1For(xi1)
{
xp1[xi1] ++;
}
zypre_BoxLoop1End(xi1);
/* Use old boxloop to check that values are set to 1 */
fail = 0;
sum = 0;
hypre_BoxLoop1Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1);
hypre_BoxLoop1For(xi1)
{
sum += xp1[xi1];
if (xp1[xi1] != 1)
{
hypre_BoxLoopGetIndex(index);
hypre_printf("*(%d,%d,%d) = %d\n",
index[0], index[1], index[2], (HYPRE_Int) xp1[xi1]);
fail = 1;
}
}
hypre_BoxLoop1End(xi1);
if (sum != (nx*ny*nz))
{
hypre_printf("*sum = %d\n", sum);
fail = 1;
}
if (fail)
{
exit(1);
}
/*-----------------------------------------------------------
* Synchronize so that timings make sense
*-----------------------------------------------------------*/
hypre_MPI_Barrier(hypre_MPI_COMM_WORLD);
/*-----------------------------------------------------------
* Time old boxloops
*-----------------------------------------------------------*/
/* Time BoxLoop0 */
time_index = hypre_InitializeTiming("BoxLoop0");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
xi1 = 0;
hypre_BoxLoop0Begin(3, loop_size);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE) firstprivate(xi1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop0For()
{
xp1[xi1] += xp1[xi1];
xi1++;
}
hypre_BoxLoop0End();
}
hypre_EndTiming(time_index);
/* Time BoxLoop1 */
time_index = hypre_InitializeTiming("BoxLoop1");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop1Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(xi1)
{
xp1[xi1] += xp1[xi1];
}
hypre_BoxLoop1End(xi1);
}
hypre_EndTiming(time_index);
/* Time BoxLoop2 */
time_index = hypre_InitializeTiming("BoxLoop2");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop2Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi1,xi2) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi1, xi2)
{
xp1[xi1] += xp1[xi1] + xp2[xi2];
}
hypre_BoxLoop2End(xi1, xi2);
}
hypre_EndTiming(time_index);
/* Time BoxLoop3 */
time_index = hypre_InitializeTiming("BoxLoop3");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop3Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi1,xi2,xi3) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(xi1, xi2, xi3)
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3];
}
hypre_BoxLoop3End(xi1, xi2, xi3);
}
hypre_EndTiming(time_index);
/* Time BoxLoop4 */
time_index = hypre_InitializeTiming("BoxLoop4");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop4Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3,
x4_data_box, start, unit_stride, xi4);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi1,xi2,xi3,xi4) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(xi1, xi2, xi3, xi4)
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4];
}
hypre_BoxLoop4End(xi1, xi2, xi3, xi4);
}
hypre_EndTiming(time_index);
hypre_PrintTiming("Old BoxLoop times", hypre_MPI_COMM_WORLD);
hypre_FinalizeTiming(time_index);
hypre_ClearTiming();
/*-----------------------------------------------------------
* Time new boxloops
*-----------------------------------------------------------*/
/* Time BoxLoop0 */
time_index = hypre_InitializeTiming("BoxLoop0");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
xi1 = 0;
zypre_BoxLoop0Begin(dim, loop_size);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) firstprivate(xi1) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop0For()
{
xp1[xi1] += xp1[xi1];
xi1++;
}
zypre_BoxLoop0End();
}
hypre_EndTiming(time_index);
/* Time BoxLoop1 */
time_index = hypre_InitializeTiming("BoxLoop1");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop1Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE,xi1) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop1For(xi1)
{
xp1[xi1] += xp1[xi1];
}
zypre_BoxLoop1End(xi1);
}
hypre_EndTiming(time_index);
/* Time BoxLoop2 */
time_index = hypre_InitializeTiming("BoxLoop2");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop2Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE,xi1,xi2) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(xi1, xi2)
{
xp1[xi1] += xp1[xi1] + xp2[xi2];
}
zypre_BoxLoop2End(xi1, xi2);
}
hypre_EndTiming(time_index);
/* Time BoxLoop3 */
time_index = hypre_InitializeTiming("BoxLoop3");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop3Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE,xi1,xi2,xi3) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop3For(xi1, xi2, xi3)
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3];
}
zypre_BoxLoop3End(xi1, xi2, xi3);
}
hypre_EndTiming(time_index);
/* Time BoxLoop4 */
time_index = hypre_InitializeTiming("BoxLoop4");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop4Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3,
x4_data_box, start, unit_stride, xi4);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE,xi1,xi2,xi3,xi4) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop4For(xi1, xi2, xi3, xi4)
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4];
}
zypre_BoxLoop4End(xi1, xi2, xi3, xi4);
}
hypre_EndTiming(time_index);
hypre_PrintTiming("New BoxLoop times", hypre_MPI_COMM_WORLD);
hypre_FinalizeTiming(time_index);
hypre_ClearTiming();
/*-----------------------------------------------------------
* Finalize things
*-----------------------------------------------------------*/
hypre_BoxDestroy(x1_data_box);
hypre_BoxDestroy(x2_data_box);
hypre_BoxDestroy(x3_data_box);
hypre_BoxDestroy(x4_data_box);
hypre_TFree(xp1);
hypre_TFree(xp2);
hypre_TFree(xp3);
hypre_TFree(xp4);
/* Finalize MPI */
hypre_MPI_Finalize();
return (0);
}
|
measure.c | #include <string.h>
#include <mpi.h>
#include "ScaleME.h"
#include "precision.h"
#include "measure.h"
#include "fsgreen.h"
#include "integrate.h"
#include "mlfma.h"
#include "util.h"
/* Evaluate a directivity pattern cos(theta) exp(-a sin(theta)**2), in which
* theta is the angle between a focal axis d and r = (obs - src), where obs and
* src are the of the observer and source positions, respectively. */
float directivity(real *obs, real *src, real *d, real a) {
float r[3], dn, rn, ctheta, sthsq;
/* Compute the distance. */
r[0] = obs[0] - src[0];
r[1] = obs[1] - src[1];
r[2] = obs[2] - src[2];
/* Compute the norms of the distance and focal axis. */
rn = sqrt(r[0] * r[0] + r[1] * r[1] + r[2] * r[2]);
dn = sqrt(d[0] * d[0] + d[1] * d[1] + d[2] * d[2]);
/* Compute the cosine of the angle, as the scaled dot product. */
ctheta = (r[0] * d[0] + r[1] * d[1] + r[2] * d[2]) / (rn * dn);
/* Compute the square of the sine of the angle. */
sthsq = 1. - ctheta * ctheta;
return ctheta * exp(-a * sthsq);
}
/* Slow computation of incident field for a single source. */
int buildrhs (cplx *rhs, real *srcloc, int plane, real *dir) {
real scale = 1.0, dc[3] = { fmaconf.cell, fmaconf.cell, fmaconf.cell };
ifunc rhsfunc = fsgreen;
/* Use a plane wave instead of a point source. */
if (plane) {
rhsfunc = fsplane;
scale = 1.0 / (4.0 * M_PI);
}
#pragma omp parallel default(shared)
{
cplx *rptr;
real ctr[3], off[3];
int i, j, idx[3];
#pragma omp for
for (i = 0; i < fmaconf.numbases; ++i) {
rptr = rhs + i * fmaconf.bspboxvol;
/* Find the center of the group. */
bscenter (fmaconf.bslist[i], off);
/* The offset of the first basis function in the group. */
off[0] += 0.5 * fmaconf.cell - 0.5 * fmaconf.grplen;
off[1] += 0.5 * fmaconf.cell - 0.5 * fmaconf.grplen;
off[2] += 0.5 * fmaconf.cell - 0.5 * fmaconf.grplen;
for (j = 0; j < fmaconf.bspboxvol; ++j, ++rptr) {
/* The position in the local grid of the basis function. */
GRID(idx, j, fmaconf.bspbox, fmaconf.bspbox);
/* The center of the basis function. */
ctr[0] = off[0] + fmaconf.cell * (real)idx[0];
ctr[1] = off[1] + fmaconf.cell * (real)idx[1];
ctr[2] = off[2] + fmaconf.cell * (real)idx[2];
*rptr = scale * srcint (fmaconf.k0, ctr, srcloc, dc, rhsfunc);
/* Include a directivity, if appropriate. */
if (dir && !plane)
*rptr *= directivity(ctr, srcloc, dir, dir[3]);
}
}
}
return 0;
}
/* Fast FMM computation of the far-field pattern using interpolation. */
int farfield (cplx *currents, measdesc *obs, cplx *result) {
int i;
cplx fact;
/* The result must be a pointer to the pointer. */
if (ScaleME_evlRootFarFld (obs->imat[0], currents, &result)) return 0;
/* The far-field pattern already has a factor of k in the front.
* However, the actual integral needs (k^2 / 4 pi), so we need the
* extra factors in the field. */
fact = fmaconf.k0 / (4 * M_PI);
for (i = 0; i < obs->count; ++i) result[i] *= fact;
return 0;
}
/* Split a string str, separated by delimitors dlm, into an array of strings
* with maximum length len. */
static int splitstr (char **arr, char *str, char *dlm, int len) {
char **ap;
/* Parse the string. */
for (ap = arr; ap < arr + len; ++ap) *ap = strsep(&str, dlm);
return len;
}
/* Attempt to an angular range for source specification. The value maxdef
* specifies the default maximum value for the angle. */
static int measrange (char *str, real *min, real *max, real maxdef) {
char **ap, *rn[3];
int nr = 0;
/* Set default angular limits values first. */
*min = 0.0;
*max = maxdef;
/* Split the measurement range into an array. */
splitstr (rn, str, ":", 3);
/* Grab the number of angles in the range. */
if (rn[0] && strlen(rn[0])) nr = strtol (rn[0], NULL, 0);
/* Grab the minimum angle, if specified. */
if (rn[1] && strlen(rn[1])) *min = strtod (rn[1], NULL);
/* Grab the maximum angle, if specified. */
if (rn[2] && strlen(rn[2])) *max = strtod (rn[2], NULL);
return nr;
}
/* Build the location descriptor. */
static int buildlocs (measdesc *desc, int plane, int ntheta,
real tmin, real tmax, int nphi, real pmin, real pmax) {
int i, j, k;
real theta, dtheta, dphi, phi, rst;
/* Clear the interpolation matrix pointer. */
desc->imat[0] = desc->imat[1] = NULL;
/* Configure the locations to be plane-wave directions or point sources. */
desc->plane = plane;
/* Record the number of angular samples desired. */
desc->ntheta = ntheta;
desc->nphi = nphi;
/* Record the limits of the ranges. Convert to radians. */
desc->trange[0] = tmin * M_PI / 180.0;
desc->trange[1] = tmax * M_PI / 180.0;
desc->prange[0] = pmin * M_PI / 180.0;
desc->prange[1] = pmax * M_PI / 180.0;
/* Count the total number of measurements and allocate the location array. */
desc->count = desc->ntheta * desc->nphi;
desc->locations = malloc (3 * desc->count * sizeof(real));
/* Calculate the angular steps. The polar angle avoids the poles. */
dtheta = (desc->trange[1] - desc->trange[0]);
dtheta /= MAX (desc->ntheta + 1, 1);
dphi = (desc->prange[1] - desc->prange[0]);
dphi /= MAX (desc->nphi, 1);
/* Populate the location array. Skip the north pole! */
for (i = 0, k = 0; i < desc->ntheta; ++i) {
theta = desc->trange[0] + (i + 1) * dtheta;
for (j = 0; j < desc->nphi; ++j, ++k) {
phi = desc->prange[0] + j * dphi;
rst = sin (theta);
desc->locations[3 * k] = rst * cos (phi);
desc->locations[3 * k + 1] = rst * sin (phi);
desc->locations[3 * k + 2] = cos (theta);
}
}
return desc->count;
}
/* Build the source descriptor with specification string spec. */
int buildsrc (measdesc *desc, char *spec) {
real tmin = 0, tmax = 0, pmin = 0, pmax = 0, srcpt[3];
char *srcloc[3];
int ntheta = 0, nphi = 0, plane = 1;
/* Split the source locations into a theta and phi range, or coordinates. */
splitstr (srcloc, spec, ",", 3);
if (srcloc[2]) {
/* If a third argument is present, use a point source. */
plane = 0;
srcpt[0] = strtod (srcloc[0], NULL);
srcpt[1] = strtod (srcloc[1], NULL);
srcpt[2] = strtod (srcloc[2], NULL);
ntheta = nphi = 1;
} else {
/* Otherwise, use a range of plane waves. */
ntheta = measrange (srcloc[0], &tmin, &tmax, 180);
nphi = measrange (srcloc[1], &pmin, &pmax, 360);
}
/* Build the observer locations. */
buildlocs (desc, plane, ntheta, tmin, tmax, nphi, pmin, pmax);
/* Override the location for a point source. */
if (!plane) memcpy (desc->locations, srcpt, 3 * sizeof(real));
return desc->count;
}
/* Build the observation descriptor with specification string spec. */
int buildobs (measdesc *desc, char *spec) {
real tmin = 0, tmax = 0, pmin = 0, pmax = 0;
char *obsloc[2];
int ntheta = 0, nphi = 0;
/* Split the observer locations into a theta and phi range. */
splitstr (obsloc, spec, ",", 2);
/* Parse the theta and phi ranges. */
ntheta = measrange (obsloc[0], &tmin, &tmax, 180);
nphi = measrange (obsloc[1], &pmin, &pmax, 360);
/* Build the observer locations. The type is always a plane-wave. */
buildlocs (desc, 1, ntheta, tmin, tmax, nphi, pmin, pmax);
return desc->count;
}
void delmeas (measdesc *desc) {
free (desc->locations);
/* Clear the root interpolation matrix, if applicable. */
if (desc->imat[0]) ScaleME_delRootInterpMat (desc->imat);
if (desc->imat[1]) ScaleME_delRootInterpMat (desc->imat + 1);
}
|
declare_simd_aarch64_sve.c | // REQUIRES: aarch64-registered-target
// -fopemp and -fopenmp-simd behavior are expected to be the same
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +sve \
// RUN: -fopenmp -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +sve \
// RUN: -fopenmp-simd -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s
#pragma omp declare simd
#pragma omp declare simd notinbranch
#pragma omp declare simd simdlen(2)
#pragma omp declare simd simdlen(4)
#pragma omp declare simd simdlen(5) // not a multiple of 128-bits
#pragma omp declare simd simdlen(6)
#pragma omp declare simd simdlen(8)
#pragma omp declare simd simdlen(32)
#pragma omp declare simd simdlen(34) // requires more than 2048 bits
double foo(float x);
// CHECK-DAG: "_ZGVsM2v_foo" "_ZGVsM32v_foo" "_ZGVsM4v_foo" "_ZGVsM6v_foo" "_ZGVsM8v_foo" "_ZGVsMxv_foo"
// CHECK-NOT: _ZGVsN
// CHECK-NOT: _ZGVsM5v_foo
// CHECK-NOT: _ZGVsM34v_foo
// CHECK-NOT: foo
void foo_loop(double *x, float *y, int N) {
for (int i = 0; i < N; ++i) {
x[i] = foo(y[i]);
}
}
// test integers
#pragma omp declare simd notinbranch
char a01_fun(int x);
// CHECK-DAG: _ZGVsMxv_a01_fun
// CHECK-NOT: a01_fun
static int *in;
static char *out;
void do_something() {
*out = a01_fun(*in);
}
|
closed_bug2.c | #include <stdio.h>
#include "assert.h"
#include <unistd.h>
#define NZ 10
#define NA 9
#pragma omp declare target
int colstat[NZ];
#pragma omp end declare target
int main(){
colstat[0]=-1;
#pragma omp target map(alloc:colstat[0:NZ])
{
colstat[1] = 1111;
}
#pragma omp target map(alloc:colstat[:0])
{
colstat[2] = 2222;
}
fprintf(stderr, "BEFORE colstat[0..2] %d %d %d \n", colstat[0], colstat[1], colstat[2]);
#pragma omp target update from(colstat)
fprintf(stderr, "AFTER colstat[0..2] %d %d %d \n", colstat[0], colstat[1], colstat[2]);
if (colstat[1] == 1111 && colstat[2] == 2222)
printf("Success\n");
else
printf("Fail!\n");
return (colstat[1] == 1111 && colstat[2] == 2222) ? 0 : 1 ;
}
|
vector.c | #include "vector.h"
void do_nothing(void *v){
}
vector_t *vector_dot_prod(vector_t *v1, vector_t *v2, void *(*foo)(void *, void *)){
if( v1->size != v2->size){ return NULL;}
vector_t *v3 = vector_init(VECTOR_VARIABLE_SIZE, v1->size);
int i;
for(i=0;i<v1->size;i++){
vector_soft_put(v3,foo(vector_get(v1,i),vector_get(v2,i)));
}
return v3;
}
vector_t *vector_x_prod(vector_t *v1, vector_t *v2, void *(*foo)(void *, void *)){
int i,j;
vector_t *v3 = vector_init(VECTOR_VARIABLE_SIZE, v1->size * v2->size);
for(i=0;i<v1->size;i++){
for(j=0;j<v2->size;j++){
void *value = foo(vector_get(v1,i),vector_get(v2,j));
if(value==NULL){continue;}
vector_soft_put(v3,value);
}
}
return v3;
}
void *vector_reduce(vector_t *v1, void *(*foo)(void * sum, void * val)){
int i;
void *sum = NULL;
for(i=0;i<v1->size;i++){
sum = foo(sum,vector_get(v1,i));
}
return sum;
}
void vector_execute_for_all(vector_t *v, void (*foo)(void *)){
int i;
for(i=0;i<v->size;i++){
foo(vector_get(v,i));
}
}
vector_t *vector_execute_for_all_and_save(vector_t *v, void *(*foo)(void *)){
int i;
vector_t *rt = vector_init(VECTOR_VARIABLE_SIZE,v->size);
for(i=0;i<v->size;i++){
vector_soft_put(rt,foo(vector_get(v,i)));
}
return rt;
}
void vector_filter(vector_t *vector, int (*check)(void *)){
int i;
int prev_policy = vector->REMOVE_POLICY;
vector->REMOVE_POLICY = REMP_LAZY;
#pragma omp parallel for
for(i=0;i<vector->size;i++){
if(!check(vector_get(vector,i))){
vector_remove(vector,i);
}
}
vector_defragment(vector);
vector->REMOVE_POLICY = prev_policy;
}
vector_t *vector_select(vector_t *vector, int (*check)(void *)){
int i;
vector_t *selected = vector_init(vector->item_sizeof,vector->size);
for(i=0;i<vector->size;i++){
if(check(vector_get(vector,i))){
vector_put(selected,vector_get(vector,i));
}
}
vector_zip(selected);
return selected;
}
vector_t *vector_init(size_t item_sizeof, size_t initial_limit){
vector_t *new_vector = (vector_t *) getMem( sizeof(vector_t));
new_vector->item_sizeof = item_sizeof;
new_vector->items = (void **) getMem( sizeof(void *) * initial_limit);
new_vector->limit = initial_limit;
new_vector->size = 0;
new_vector->REMOVE_POLICY = 0;
new_vector->fragmental=0;
new_vector->rmv = &free;
return new_vector;
}
void vector_tabularasa(vector_t *vector){
int i;
for(i=0;i<vector->size;i++){
vector->items[i] = NULL;
}
vector->size = 0;
}
void vector_soft_put(vector_t *vector, void *item){
if(vector->limit == vector->size){
size_t new_limit = vector->limit +( vector->limit>>1) + 1;
resizeMem((void **)&(vector->items),vector->limit * sizeof(void *),sizeof(void *) * new_limit);
vector->limit = new_limit;
}
vector->items[vector->size] = item;
vector->size++;
}
int vector_put(vector_t *vector, void *item){
if(vector->limit == vector->size){
size_t new_limit = vector->limit +( vector->limit>>1) + 1;
resizeMem((void **)&(vector->items),vector->limit * sizeof(void *),sizeof(void *) * new_limit);
vector->limit = new_limit;
}
vector->items[vector->size] = getMem(vector->item_sizeof);
memcpy( vector->items[vector->size],item,vector->item_sizeof);
vector->size = vector->size + 1;
return 0;
}
void vector_soft_transfer(vector_t *target, vector_t *source){
int i;
for(i=0;i<source->size;i++){
vector_soft_put(target,vector_get(source,i));
}
void (*tmp_rmv)(void *) = source->rmv;
source->rmv = do_nothing;
vector_clear(source);
source->rmv = tmp_rmv;
}
void vector_update_remove_policy(vector_t *vector, int policy){
vector->REMOVE_POLICY = policy;
}
int vector_contains(vector_t *vector, void *item){
int i;
for(i=0;i<vector->size;i++){
if(memcmp(vector->items[i], item,vector->item_sizeof)==0){
return i;
}
}
return -1;
}
int vector_comptains(vector_t *vector, void *item, int (*cmp)(const void*, const void*)){
int i;
for(i=0;i<vector->size;i++){
if(cmp(vector->items[i],item)==0){
return i;
}
}
return -1;
}
int vector_defragment(vector_t *vector){
if(vector->fragmental==0){
return 0;
}
int i = 0;
int j = 0;
while(i+1<vector->size){
++i;
if(vector->items[j]!=NULL){
++j;
}
else{
vector->fragmental--;
}
if(i!=j){
vector->items[j]=vector->items[i];
}
}
vector->fragmental=0;
vector->size = j;
return 1;
}
void vector_insert(vector_t *vector, void *item, size_t index){
if(vector->limit == vector->size){
size_t new_limit = vector->limit +( vector->limit>>1) + 1;
resizeMem((void **)&(vector->items),vector->limit * sizeof(void *),sizeof(void *) * new_limit);
vector->limit = new_limit;
}
size_t i;
size_t target = vector->size;
if(vector->fragmental>0){
for(i=index;i<vector->size;i++){
if(vector->items[i]==NULL){
target=i;
vector->fragmental--;
break;
}
}
}
for(i=target;i>index;i--){
vector->items[i]=vector->items[i-1];
}
vector->items[index] = getMem(vector->item_sizeof);
memcpy( vector->items[index],item,vector->item_sizeof);
vector->size = vector->size + 1;
}
int vector_remove(vector_t *vector, size_t index){
if(vector->items[index] == NULL || vector->size <= index){
return -1;
}
switch(vector->REMOVE_POLICY){
case REMP_SORTED:
vector->rmv(vector->items[index]);
vector->size--;
int i;
for(i=index;i<vector->size;i++){
vector->items[i] = vector->items[i+1];
}
break;
case REMP_FAST:
vector->size--;
vector->rmv(vector->items[index]);
vector->items[index] = vector->items[vector->size];
break;
case REMP_LAZY:
vector->rmv(vector->items[index]);
vector->items[index] = NULL;
vector->fragmental++;
break;
default:
fprintf(stderr,"UNKNOWN POLICY %d\n", vector->REMOVE_POLICY);
return -2;
}
return 0;
}
void vector_clear( vector_t *vector){
int i;
for(i=0;i<vector->size;i++){
vector->rmv(vector->items[i]);
vector->items[i] = NULL;
}
vector->size = 0;
}
//NOT TESTED YET
void vector_zip( vector_t *vector){
resizeMem((void **)&(vector->items),vector->limit * sizeof(void *),vector->size * sizeof(void *));
vector->limit = vector->size;
}
void *vector_get( vector_t *vector, size_t index){
#ifdef __DEBUG__
if(vector->size <= index){
fprintf(stderr,"Access out of index\n");
return NULL;
}
#endif
return vector->items[index];
}
void *vector_tail(vector_t *vector){
return vector->items[vector->size-1];
}
void *vector_head(vector_t *vector){
return vector->items[0];
}
void vector_free( void *v){
vector_t *vector = v;
if(vector==NULL){return;}
int i;
for(i = 0; i< vector->size;i++){
vector->rmv(vector->items[i]);
}
freeMem(vector->items,vector->limit * sizeof(void *));
freeMem(vector,sizeof(vector_t));
}
void vector_set_remove_function(vector_t *vector, void (*rmv)(void *)){
vector->rmv = rmv;
}
vector_t *dang_string_tokenize(const char *str, const char *delimiters){
vector_t *tokens = vector_init(VECTOR_VARIABLE_SIZE,8);
if(str == NULL){ return tokens;}
size_t str_size = strlen(str);
if(delimiters == NULL || delimiters[0] == 0){
char * dummy = malloc(str_size*sizeof(char)+1);
strcpy(dummy,str);
vector_soft_put(tokens,dummy);
return tokens;
}
size_t prev = 0;
size_t index = 0;
while(prev < str_size){
index = strcspn(str+prev,delimiters);
char *dummy = malloc((index + 1) * sizeof(char));
memcpy(dummy,str+prev,index);
dummy[index] = 0;
vector_soft_put(tokens,dummy);
prev+=(1+index);
}
vector_zip(tokens);
return tokens;
}
|
mpm_boundary_rotation_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Bodhinanda Chandra
//
#ifndef KRATOS_MPM_BOUNDARY_ROTATION_UTILITY
#define KRATOS_MPM_BOUNDARY_ROTATION_UTILITY
// system includes
// external includes
// kratos includes
#include "includes/define.h"
#include "includes/node.h"
#include "containers/variable.h"
#include "geometries/geometry.h"
#include "utilities/coordinate_transformation_utilities.h"
namespace Kratos {
///@addtogroup ParticleMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/* A utility to rotate the local contributions of certain nodes to the system matrix,
which is required to apply slip conditions (roller-type support) in arbitrary directions to the boundary nodes.*/
template<class TLocalMatrixType, class TLocalVectorType>
class MPMBoundaryRotationUtility: public CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,double> {
public:
///@name Type Definitions
///@{
/// Pointer definition of MPMBoundaryRotationUtility
KRATOS_CLASS_POINTER_DEFINITION(MPMBoundaryRotationUtility);
using CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,double>::Rotate;
typedef Node<3> NodeType;
typedef Geometry< Node<3> > GeometryType;
///@}
///@name Life Cycle
///@{
/// Constructor.
/** @param DomainSize Number of space dimensions (2 or 3)
* @param NumRowsPerNode Number of matrix or vector rows associated to each node. Displacement DOFs are assumed to be the first mDomainSize rows in each block of rows.
* @param rVariable Kratos variable used to flag nodes where local system contributions will be rotated. All nodes with rVariable != Zero will be rotated.
* @param Zero The zero value for the variable.
*/
MPMBoundaryRotationUtility(
const unsigned int DomainSize,
const unsigned int BlockSize,
const Variable<double>& rVariable):
CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,double>(DomainSize,BlockSize,rVariable,0.0), mrFlagVariable(rVariable)
{}
/// Destructor.
~MPMBoundaryRotationUtility() override {}
/// Assignment operator.
MPMBoundaryRotationUtility& operator=(MPMBoundaryRotationUtility const& rOther) {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Rotate the local system contributions so that they are oriented with each node's normal.
/**
@param rLocalMatrix Local system matrix
@param rLocalVector Local RHS vector
@param rGeometry A reference to the element's (or condition's) geometry
*/
void Rotate(
TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
if (this->GetBlockSize() == this->GetDomainSize()) // irreducible case
{
if (this->GetDomainSize() == 2) this->template RotateAuxPure<2>(rLocalMatrix,rLocalVector,rGeometry);
else if (this->GetDomainSize() == 3) this->template RotateAuxPure<3>(rLocalMatrix,rLocalVector,rGeometry);
}
else // mixed formulation case
{
if (this->GetDomainSize() == 2) this->template RotateAux<2,3>(rLocalMatrix,rLocalVector,rGeometry);
else if (this->GetDomainSize() == 3) this->template RotateAux<3,4>(rLocalMatrix,rLocalVector,rGeometry);
}
}
/// RHS only version of Rotate
void RotateRHS(
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
this->Rotate(rLocalVector,rGeometry);
}
/// Apply roler type boundary conditions to the rotated local contributions.
/** This function takes the rotated local system contributions so each
node's displacement are expressed using a base oriented with its normal
and imposes that the normal displacement is equal to the mesh displacement in
the normal direction.
*/
void ApplySlipCondition(TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
const unsigned int LocalSize = rLocalVector.size();
if (LocalSize > 0)
{
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if(this->IsSlip(rGeometry[itNode]) )
{
// We fix the first displacement dof (normal component) for each rotated block
unsigned int j = itNode * this->GetBlockSize();
// Get the displacement of the boundary mesh, this does not assume that the mesh is moving.
// If the mesh is moving, need to consider the displacement of the moving mesh into account.
const array_1d<double,3> & displacement = rGeometry[itNode].FastGetSolutionStepValue(DISPLACEMENT);
// Get Normal Vector of the boundary
array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL);
this->Normalize(rN);
for( unsigned int i = 0; i < j; ++i)// Skip term (i,i)
{
rLocalMatrix(i,j) = 0.0;
rLocalMatrix(j,i) = 0.0;
}
for( unsigned int i = j+1; i < LocalSize; ++i)
{
rLocalMatrix(i,j) = 0.0;
rLocalMatrix(j,i) = 0.0;
}
rLocalVector[j] = inner_prod(rN,displacement);
rLocalMatrix(j,j) = 1.0;
}
}
}
}
/// RHS only version of ApplySlipCondition
void ApplySlipCondition(TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
if (rLocalVector.size() > 0)
{
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if( this->IsSlip(rGeometry[itNode]) )
{
// We fix the first momentum dof (normal component) for each rotated block
unsigned int j = itNode * this->GetBlockSize(); // +1
// Get the displacement of the boundary mesh, this does not assume that the mesh is moving.
// If the mesh is moving, need to consider the displacement of the moving mesh into account.
const array_1d<double,3> & displacement = rGeometry[itNode].FastGetSolutionStepValue(DISPLACEMENT);
array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL);
this->Normalize(rN);
rLocalVector[j] = inner_prod(rN,displacement);
}
}
}
}
// An extra function to distinguish the application of slip in element considering penalty imposition
void ElementApplySlipCondition(TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
// If it is not a penalty element, do as standard
// Otherwise, if it is a penalty element, dont do anything
if (!this->IsPenalty(rGeometry))
{
this->ApplySlipCondition(rLocalMatrix, rLocalVector, rGeometry);
}
}
// An extra function to distinguish the application of slip in element considering penalty imposition (RHS Version)
void ElementApplySlipCondition(TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
// If it is not a penalty element, do as standard
// Otherwise, if it is a penalty element, dont do anything
if (!this->IsPenalty(rGeometry))
{
this->ApplySlipCondition(rLocalVector, rGeometry);
}
}
// An extra function to distinguish the application of slip in condition considering penalty imposition
void ConditionApplySlipCondition(TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
// If it is not a penalty condition, do as standard
if (!this->IsPenalty(rGeometry))
{
this->ApplySlipCondition(rLocalMatrix, rLocalVector, rGeometry);
}
// Otherwise, do the following modification
else
{
const unsigned int LocalSize = rLocalVector.size();
if (LocalSize > 0)
{
const unsigned int block_size = this->GetBlockSize();
TLocalMatrixType temp_matrix = ZeroMatrix(rLocalMatrix.size1(),rLocalMatrix.size2());
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if(this->IsSlip(rGeometry[itNode]) )
{
// We fix the first displacement dof (normal component) for each rotated block
unsigned int j = itNode * block_size;
// Copy all normal value in LHS to the temp_matrix
for (unsigned int i = j; i < rLocalMatrix.size1(); i+= block_size)
{
temp_matrix(i,j) = rLocalMatrix(i,j);
temp_matrix(j,i) = rLocalMatrix(j,i);
}
// Remove all other value in RHS than the normal component
for(unsigned int i = j; i < (j + block_size); ++i)
{
if (i!=j) rLocalVector[i] = 0.0;
}
}
}
rLocalMatrix = temp_matrix;
}
}
}
// An extra function to distinguish the application of slip in condition considering penalty imposition (RHS Version)
void ConditionApplySlipCondition(TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
// If it is not a penalty condition, do as standard
if (!this->IsPenalty(rGeometry))
{
this->ApplySlipCondition(rLocalVector, rGeometry);
}
// Otherwise, if it is a penalty element, dont do anything
else
{
if (rLocalVector.size() > 0)
{
const unsigned int block_size = this->GetBlockSize();
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if( this->IsSlip(rGeometry[itNode]) )
{
// We fix the first momentum dof (normal component) for each rotated block
unsigned int j = itNode * block_size;
// Remove all other value than the normal component
for(unsigned int i = j; i < (j + block_size); ++i)
{
if (i!=j) rLocalVector[i] = 0.0;
}
}
}
}
}
}
// Checking whether it is normal element or penalty element
bool IsPenalty(GeometryType& rGeometry) const
{
bool is_penalty = false;
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if(this->IsSlip(rGeometry[itNode]) )
{
const double identifier = rGeometry[itNode].FastGetSolutionStepValue(mrFlagVariable);
const double tolerance = 1.e-6;
if (identifier > 1.00 + tolerance)
{
is_penalty = true;
break;
}
}
}
return is_penalty;
}
/// Transform nodal displacement to the rotated coordinates (aligned with each node's normal)
/// The name is kept to be Rotate Velocities, since it is currently a derived class of coordinate_transformation_utilities in the core
void RotateVelocities(ModelPart& rModelPart) const override
{
TLocalVectorType displacement(this->GetDomainSize());
TLocalVectorType Tmp(this->GetDomainSize());
ModelPart::NodeIterator it_begin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(displacement,Tmp)
for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++)
{
ModelPart::NodeIterator itNode = it_begin+iii;
if( this->IsSlip(*itNode) )
{
//this->RotationOperator<TLocalMatrixType>(Rotation,);
if(this->GetDomainSize() == 3)
{
BoundedMatrix<double,3,3> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rDisplacement = itNode->FastGetSolutionStepValue(DISPLACEMENT);
for(unsigned int i = 0; i < 3; i++) displacement[i] = rDisplacement[i];
noalias(Tmp) = prod(rRot,displacement);
for(unsigned int i = 0; i < 3; i++) rDisplacement[i] = Tmp[i];
}
else
{
BoundedMatrix<double,2,2> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rDisplacement = itNode->FastGetSolutionStepValue(DISPLACEMENT);
for(unsigned int i = 0; i < 2; i++) displacement[i] = rDisplacement[i];
noalias(Tmp) = prod(rRot,displacement);
for(unsigned int i = 0; i < 2; i++) rDisplacement[i] = Tmp[i];
}
}
}
}
/// Transform nodal displacement from the rotated system to the original configuration
/// The name is kept to be Recover Velocities, since it is currently a derived class of coordinate_transformation_utilities in the core
void RecoverVelocities(ModelPart& rModelPart) const override
{
TLocalVectorType displacement(this->GetDomainSize());
TLocalVectorType Tmp(this->GetDomainSize());
ModelPart::NodeIterator it_begin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(displacement,Tmp)
for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++)
{
ModelPart::NodeIterator itNode = it_begin+iii;
if( this->IsSlip(*itNode) )
{
if(this->GetDomainSize() == 3)
{
BoundedMatrix<double,3,3> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rDisplacement = itNode->FastGetSolutionStepValue(DISPLACEMENT);
for(unsigned int i = 0; i < 3; i++) displacement[i] = rDisplacement[i];
noalias(Tmp) = prod(trans(rRot),displacement);
for(unsigned int i = 0; i < 3; i++) rDisplacement[i] = Tmp[i];
}
else
{
BoundedMatrix<double,2,2> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rDisplacement = itNode->FastGetSolutionStepValue(DISPLACEMENT);
for(unsigned int i = 0; i < 2; i++) displacement[i] = rDisplacement[i];
noalias(Tmp) = prod(trans(rRot),displacement);
for(unsigned int i = 0; i < 2; i++) rDisplacement[i] = Tmp[i];
}
}
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "MPMBoundaryRotationUtility";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "MPMBoundaryRotationUtility";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
const Variable<double>& mrFlagVariable;
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
};
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TLocalMatrixType, class TLocalVectorType>
inline std::istream& operator >>(std::istream& rIStream,
MPMBoundaryRotationUtility<TLocalMatrixType, TLocalVectorType>& rThis) {
return rIStream;
}
/// output stream function
template<class TLocalMatrixType, class TLocalVectorType>
inline std::ostream& operator <<(std::ostream& rOStream,
const MPMBoundaryRotationUtility<TLocalMatrixType, TLocalVectorType>& rThis) {
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}
#endif // KRATOS_MPM_BOUNDARY_ROTATION_UTILITY
|
GB_binop__gt_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_fp64)
// A*D function (colscale): GB (_AxD__gt_fp64)
// D*A function (rowscale): GB (_DxB__gt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_fp64)
// C=scalar+B GB (_bind1st__gt_fp64)
// C=scalar+B' GB (_bind1st_tran__gt_fp64)
// C=A+scalar GB (_bind2nd__gt_fp64)
// C=A'+scalar GB (_bind2nd_tran__gt_fp64)
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_FP64 || GxB_NO_GT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
blake2sp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 8
/*
blake2sp_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P )
{
int err = blake2s_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint64_t offset )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store16( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2sp_init_leaf_param( S, P );
}
static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store16( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, S->outlen );
}
int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[i], in__, len );
}
blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2sp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2sp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <cstring>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "./base.h"
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
// forward declare learner.
class LearnerImpl;
/*! \brief data type accepted by xgboost interface */
enum DataType {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
*/
std::vector<bst_uint> root_index_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 3;
/*! \brief version that contains qid field */
static const int kVersionWithQid = 2;
/*! \brief default constructor */
MetaInfo() = default;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
* \param i Instance index.
* \return The pre-defined root index of i-th instance.
*/
inline unsigned GetRoot(size_t i) const {
return root_index_.size() != 0 ? root_index_[i] : 0U;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_uint index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid;
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.AddBudget(inst[j].index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.Push(
inst[j].index,
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
void Push(const Inst &inst);
size_t Size() { return offset.Size() - 1; }
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() {}
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {}
BatchIterator<T> begin() { return begin_iter_; }
BatchIterator<T> end() { return BatchIterator<T>(nullptr); }
private:
BatchIterator<T> begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
template<typename T>
class DataSource : public dmlc::DataIter<T> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches();
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
* \brief Save DMatrix to local file.
* The saved file only works for non-sharded dataset(single machine training).
* This API is deprecated and dis-encouraged to use.
* \param fname The file name to be saved.
* \return The created DMatrix.
*/
virtual void SaveToLocalFile(const std::string& fname);
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
const size_t page_size = kPageSize);
/*!
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
* \param source The source iterator of the data, the create function takes ownership of the source.
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \return a Created DMatrix.
*/
static DMatrix* Create(std::unique_ptr<DataSource<SparsePage>>&& source,
const std::string& cache_prefix = "");
/*!
* \brief Create a DMatrix by loading data from parser.
* Parser can later be deleted after the DMatrix i created.
* \param parser The input data parser
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \param page_size Page size for external memory.
* \sa dmlc::Parser
* \note dmlc-core provides efficient distributed data parser for libsvm format.
* User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER.
* See "dmlc-core/include/dmlc/data.h" for detail.
* \return A created DMatrix.
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "",
const size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches() {
return GetRowBatches();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches() {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches() {
return GetSortedColumnBatches();
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
}
#endif // XGBOOST_DATA_H_
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#include "libperf_int.h"
#include <ucs/debug/log.h>
#include <string.h>
#include <malloc.h>
#include <unistd.h>
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t addr_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
/* Allocate send buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.send_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate send buffer: %s", ucs_status_string(status));
goto err;
}
ucs_assert(perf->uct.send_mem.md == perf->uct.md);
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.recv_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate receive buffer: %s", ucs_status_string(status));
goto err_free_send;
}
ucs_assert(perf->uct.recv_mem.md == perf->uct.md);
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_send;
}
perf->offset = 0;
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_send:
uct_iface_mem_free(&perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
uct_iface_mem_free(&perf->uct.send_mem);
uct_iface_mem_free(&perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->prev.time = perf->start_time;
}
static void ucx_perf_test_reset(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
unsigned i;
perf->params = *params;
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + perf->start_time;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.time = perf->start_time;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
perf->offset = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
double latency_factor;
double sec_value;
sec_value = ucs_time_from_sec(1.0);
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
latency_factor = 2.0;
} else {
latency_factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time - perf->start_time;
/* Latency */
result->latency.typical =
__find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE)
/ sec_value
/ latency_factor;
result->latency.moment_average =
(double)(perf->current.time - perf->prev.time)
/ (perf->current.iters - perf->prev.iters)
/ sec_value
/ latency_factor;
result->latency.total_average =
(double)(perf->current.time - perf->start_time)
/ perf->current.iters
/ sec_value
/ latency_factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) * sec_value
/ (double)(perf->current.time - perf->prev.time);
result->bandwidth.total_average =
perf->current.bytes * sec_value
/ (double)(perf->current.time - perf->start_time);
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) * sec_value
/ (double)(perf->current.time - perf->prev.time);
result->msgrate.total_average =
perf->current.msgs * sec_value
/ (double)(perf->current.time - perf->start_time);
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
if (ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline uint64_t __get_atomic_flag(size_t size, uint64_t flag32, uint64_t flag64)
{
return (size == 4) ? flag32 :
(size == 8) ? flag64 :
0;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface)
{
uct_iface_attr_t attr;
ucs_status_t status;
uint64_t required_flags;
size_t min_size, max_size, max_iov, message_size;
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_AM_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, 0,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, 0,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_ADD32,
UCT_IFACE_FLAG_ATOMIC_ADD64);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_FADD32,
UCT_IFACE_FLAG_ATOMIC_FADD64);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_SWAP32,
UCT_IFACE_FLAG_ATOMIC_SWAP64);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_CSWAP32,
UCT_IFACE_FLAG_ATOMIC_CSWAP64);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
if (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Device does not support required operation");
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small");
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too big");
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size too big");
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size larger than message size");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window too large (should be <= %d)",
UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = (void*)rkey_buffer + info.rkey_size;
iface_addr = (void*)dev_addr + info.uct.dev_addr_len;
ep_addr = (void*)iface_addr + info.uct.iface_addr_len;
ucs_assert_always((void*)ep_addr + info.uct.ep_addr_len <= buffer + buffer_size);
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
}
if (info.rkey_size > 0) {
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(perf->uct.iface, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = (void*)rkey_buffer + remote_info->rkey_size;
iface_addr = (void*)dev_addr + remote_info->uct.dev_addr_len;
ep_addr = (void*)iface_addr + remote_info->uct.iface_addr_len;
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(rkey_buffer, &perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.type = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
status = uct_ep_create_connected(perf->uct.iface, dev_addr, iface_addr,
&perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
rte_call(perf, barrier);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.type != NULL) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
rte_call(perf, barrier);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, UCT_AM_CB_FLAG_SYNC);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_check_params(ucx_perf_params_t *params,
uint64_t *features)
{
ucs_status_t status, message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
*features = UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
*features = UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
*features = UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
*features = UCP_FEATURE_TAG;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
ucs_status_t status;
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = perf->send_buffer;
mem_map_params.length = buffer_size * params->thread_count;
mem_map_params.flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCP_MEM_MAP_NONBLOCK : 0;
mem_map_params.flags |= UCP_MEM_MAP_ALLOCATE;
status = ucp_mem_map(perf->ucp.context, &mem_map_params,
&perf->ucp.send_memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(perf->ucp.send_memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = mem_attr.address;
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = perf->recv_buffer;
mem_map_params.length = buffer_size * params->thread_count;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
status = ucp_mem_map(perf->ucp.context, &mem_map_params, &perf->ucp.recv_memh);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(perf->ucp.recv_memh, &mem_attr);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
perf->recv_buffer = mem_attr.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
ucp_mem_unmap(perf->ucp.context, perf->ucp.recv_memh);
err_free_send_buffer:
ucp_mem_unmap(perf->ucp.context, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
ucp_mem_unmap(perf->ucp.context, perf->ucp.recv_memh);
ucp_mem_unmap(perf->ucp.context, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf,
unsigned group_size)
{
unsigned i;
for (i = 0; i < group_size; ++i) {
if (perf->ucp.peers[i].rkey != NULL) {
ucp_rkey_destroy(perf->ucp.peers[i].rkey);
}
if (perf->ucp.peers[i].ep != NULL) {
ucp_ep_destroy(perf->ucp.peers[i].ep);
}
}
free(perf->ucp.peers);
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = UCS_OK;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
ucp_address_t *address;
size_t address_length = 0;
ucp_ep_params_t ep_params;
ucs_status_t status;
struct iovec vec[3];
void *rkey_buffer;
void *req = NULL;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
status = ucp_worker_get_address(perf->ucp.worker, &address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status));
}
goto err;
}
info.ucp.addr_len = address_length;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = address;
vec[1].iov_len = address_length;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &info.rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
ucp_worker_release_address(perf->ucp.worker, address);
goto err;
}
vec[2].iov_base = rkey_buffer;
vec[2].iov_len = info.rkey_size;
rte_call(perf, post_vec, vec, 3, &req);
ucp_rkey_buffer_release(rkey_buffer);
} else {
info.rkey_size = 0;
rte_call(perf, post_vec, vec, 2, &req);
}
ucp_worker_release_address(perf->ucp.worker, address);
rte_call(perf, exchange_vec, req);
perf->ucp.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->ucp.peers == NULL) {
goto err;
}
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err_destroy_eps;
}
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
address = (void*)(remote_info + 1);
rkey_buffer = (void*)address + remote_info->ucp.addr_len;
perf->ucp.peers[i].remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.worker, &ep_params, &perf->ucp.peers[i].ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.peers[i].ep, rkey_buffer,
&perf->ucp.peers[i].rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
} else {
perf->ucp.peers[i].rkey = NULL;
}
}
free(buffer);
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
ucp_perf_test_destroy_eps(perf, group_size);
}
return status;
err_free_buffer:
free(buffer);
err_destroy_eps:
ucp_perf_test_destroy_eps(perf, group_size);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size;
rte_call(perf, barrier);
group_size = rte_call(perf, group_size);
ucp_perf_test_destroy_eps(perf, group_size);
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf, ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter, params->max_iter / 10);
perf->report_interval = -1;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_md_resource_desc_t *md_resources;
uct_tl_resource_desc_t *tl_resources;
unsigned i, num_md_resources;
unsigned j, num_tl_resources;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_md_resources(&md_resources, &num_md_resources);
if (status != UCS_OK) {
goto out;
}
for (i = 0; i < num_md_resources; ++i) {
status = uct_md_config_read(md_resources[i].md_name, NULL, NULL, &md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_open(md_resources[i].md_name, md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_md_resources;
}
for (j = 0; j < num_tl_resources; ++j) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[j].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[j].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.md = md;
status = UCS_OK;
goto out_release_md_resources;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name,
perf->params.uct.dev_name);
status = UCS_ERR_NO_DEVICE;
out_release_md_resources:
uct_release_md_resource_list(md_resources);
out:
return status;
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.tl_name = params->uct.tl_name,
.dev_name = params->uct.dev_name,
.stats_root = NULL,
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_iface_config_read(params->uct.tl_name, NULL, NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
uint64_t features;
status = ucp_perf_test_check_params(params, &features);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES;
ucp_params.features = features;
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = params->thread_mode;
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.worker);
if (status != UCS_OK) {
goto err_cleanup;
}
status = ucp_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
ucs_warn("ucp test failed to alocate memory");
goto err_destroy_worker;
}
status = ucp_perf_test_setup_endpoints(perf, features);
if (status != UCS_OK) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_mem;
}
return UCS_OK;
err_free_mem:
ucp_perf_test_free_mem(perf);
err_destroy_worker:
ucp_worker_destroy(perf->ucp.worker);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
rte_call(perf, barrier);
ucp_perf_test_free_mem(perf);
ucp_worker_destroy(perf->ucp.worker);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf, ucx_perf_params_t *params);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch}
};
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(ucx_perf_params_t *params, ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_reset(perf, params);
status = ucx_perf_funcs[params->api].setup(perf, params);
if (status != UCS_OK) {
goto out_free;
}
if (UCS_THREAD_MODE_SINGLE == params->thread_mode) {
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
rte_call(perf, barrier);
ucx_perf_test_reset(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
/* multiple threads sharing the same worker/iface */
#include <omp.h>
typedef struct {
pthread_t pt;
int tid;
int ntid;
ucs_status_t* statuses;
ucx_perf_context_t perf;
ucx_perf_result_t result;
} ucx_perf_thread_context_t;
static void* ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg;
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t* statuses = tctx->statuses;
int tid = tctx->tid;
int i;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
ucx_perf_test_reset(perf, params);
}
/* Run test */
#pragma omp barrier
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
{
/* Assuming all threads are fairly treated, reporting only tid==0
TODO: aggregate reports */
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
out:
return &statuses[tid];
}
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx;
ucs_status_t* statuses;
size_t message_size;
ucs_status_t status;
int ti, nti;
message_size = ucx_perf_get_message_size(&perf->params);
omp_set_num_threads(perf->params.thread_count);
nti = perf->params.thread_count;
tctx = calloc(nti, sizeof(ucx_perf_thread_context_t));
statuses = calloc(nti, sizeof(ucs_status_t));
if ((tctx == NULL) || (statuses == NULL)) {
status = UCS_ERR_NO_MEMORY;
goto out_free;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].tid = ti;
tctx[ti].ntid = nti;
tctx[ti].statuses = statuses;
tctx[ti].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
tctx[ti].perf.send_buffer += ti * message_size;
tctx[ti].perf.recv_buffer += ti * message_size;
tctx[ti].perf.offset = ti * message_size;
ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < nti; ti++) {
if (UCS_OK != statuses[ti]) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(statuses[ti]));
status = statuses[ti];
}
}
out_free:
free(statuses);
free(tctx);
return status;
}
#else
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
|
McBuilder.h | #pragma once
#include <algorithm>
#include "../../../DataStructures/RAPTOR/Data.h"
#include "../../../Helpers/MultiThreading.h"
#include "../../../Helpers/Timer.h"
#include "../../../Helpers/Console/Progress.h"
#include "McShortcutSearch.h"
namespace RAPTOR::ULTRA {
template<bool DEBUG = false, bool USE_TIEBREAKING_KEY = true>
class McBuilder {
public:
inline static constexpr bool Debug = DEBUG;
inline static constexpr bool UseTiebreakingKey = USE_TIEBREAKING_KEY;
using Type = McBuilder<Debug, UseTiebreakingKey>;
public:
McBuilder(const Data& data) :
data(data) {
shortcutGraph.addVertices(data.numberOfStops());
for (const Vertex vertex : shortcutGraph.vertices()) {
shortcutGraph.set(Coordinates, vertex, data.transferGraph.get(Coordinates, vertex));
}
}
void computeShortcuts(const ThreadPinning& threadPinning, const int witnessTransferLimit = 0, const int minDepartureTime = -never, const int maxDepartureTime = never, const bool verbose = true) noexcept {
if (verbose) std::cout << "Computing shortcuts with " << threadPinning.numberOfThreads << " threads." << std::endl;
Progress progress(data.numberOfStops(), verbose);
omp_set_num_threads(threadPinning.numberOfThreads);
#pragma omp parallel
{
threadPinning.pinThread();
DynamicTransferGraph localShortcutGraph = shortcutGraph;
McShortcutSearch<Debug, UseTiebreakingKey> shortcutSearch(data, localShortcutGraph, witnessTransferLimit);
#pragma omp for schedule(dynamic)
for (size_t i = 0; i < data.numberOfStops(); i++) {
shortcutSearch.run(StopId(i), minDepartureTime, maxDepartureTime);
progress++;
}
#pragma omp critical
{
for (const Vertex from : shortcutGraph.vertices()) {
for (const Edge edge : localShortcutGraph.edgesFrom(from)) {
const Vertex to = localShortcutGraph.get(ToVertex, edge);
if (!shortcutGraph.hasEdge(from, to)) {
shortcutGraph.addEdge(from, to).set(TravelTime, localShortcutGraph.get(TravelTime, edge));
} else {
AssertMsg(shortcutGraph.get(TravelTime, shortcutGraph.findEdge(from, to)) == localShortcutGraph.get(TravelTime, edge), "Edge from " << from << " to " << to << " has inconclusive travel time (" << shortcutGraph.get(TravelTime, shortcutGraph.findEdge(from, to)) << ", " << localShortcutGraph.get(TravelTime, edge) << ")");
}
}
}
}
}
progress.finished();
}
inline const DynamicTransferGraph& getShortcutGraph() const noexcept {
return shortcutGraph;
}
inline DynamicTransferGraph& getShortcutGraph() noexcept {
return shortcutGraph;
}
private:
const Data& data;
DynamicTransferGraph shortcutGraph;
};
}
|
kmp_dispatch_buf_range.c | // RUN: %libomp-compile
// RUN: env KMP_DISP_NUM_BUFFERS=0 %libomp-run 2>&1 | FileCheck --check-prefix=SMALL %s
// RUN: env KMP_DISP_NUM_BUFFERS=4097 %libomp-run 2>&1 | FileCheck --check-prefix=LARGE %s
// SMALL: OMP: Warning
// SMALL-SAME: KMP_DISP_NUM_BUFFERS
// SMALL-SAME: too small
// LARGE: OMP: Warning
// LARGE-SAME: KMP_DISP_NUM_BUFFERS
// LARGE-SAME: too large
#include <stdio.h>
#include <stdlib.h>
int main() {
int i;
#pragma omp parallel for
for (i = 0; i < 1000; i++) {}
return EXIT_SUCCESS;
}
|
bitmap.h | /*!
* Copyright 2014 by Contributors
* \file bitmap.h
* \brief a simple implement of bitmap
* NOTE: bitmap is only threadsafe per word access, remember this when using bitmap
* \author Tianqi Chen
*/
#ifndef XGBOOST_COMMON_BITMAP_H_
#define XGBOOST_COMMON_BITMAP_H_
#include "dmlc/omp.h"
#include <vector>
namespace xgboost {
namespace common {
/*! \brief bit map that contains set of bit indicators */
struct BitMap {
/*! \brief internal data structure */
std::vector<uint32_t> data;
/*!
* \brief resize the bitmap to be certain size
* \param size the size of bitmap
*/
inline void Resize(size_t size) {
data.resize((size + 31U) >> 5, 0);
}
/*!
* \brief query the i-th position of bitmap
* \param i the position in
*/
inline bool Get(size_t i) const {
return (data[i >> 5] >> (i & 31U)) & 1U;
}
/*!
* \brief set i-th position to true
* \param i position index
*/
inline void SetTrue(size_t i) {
data[i >> 5] |= (1 << (i & 31U));
}
/*! \brief initialize the value of bit map from vector of bool*/
inline void InitFromBool(const std::vector<int>& vec) {
this->Resize(vec.size());
// parallel over the full cases
bst_omp_uint nsize = static_cast<bst_omp_uint>(vec.size() / 32);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nsize; ++i) {
uint32_t res = 0;
for (int k = 0; k < 32; ++k) {
int bit = vec[(i << 5) | k];
res |= (bit << k);
}
data[i] = res;
}
if (nsize != vec.size()) data.back() = 0;
for (size_t i = nsize; i < vec.size(); ++i) {
if (vec[i]) this->SetTrue(i);
}
}
/*! \brief clear the bitmap, set all places to false */
inline void Clear() {
std::fill(data.begin(), data.end(), 0U);
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_BITMAP_H_
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),GetPixelBlue(p),
&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
2D_example.c | /* To compile this program on Linux, try:
make CFLAGS='-std=c99 -Wall' example_2D
To run:
./example_2D; echo $?
It should print 0 if OK.
You can even compile it to run on multicore SMP for free with
make CFLAGS='-std=c99 -fopenmp -Wall' example_2D
To verify there are really some clone() system calls that create the threads:
strace -f ./example_2D ; echo $?
You can notice that the #pragma smecy are ignored (the project is
on-going :-) ) but that the program produces already correct results in
sequential execution and parallel OpenMP execution.
Enjoy!
Ronan.Keryell@hpc-project.com
for ARTEMIS SMECY European project.
*/
#include <stdlib.h>
#include "example_helper.h"
// Problem size
enum { WIDTH = 500, HEIGHT = 200 };
/* The main host program controlling and representing the whole
application */
int main(int argc, char* argv[]) {
int image[HEIGHT][WIDTH];
unsigned char output[HEIGHT][WIDTH];
// Initialize with some values
init_image(WIDTH, HEIGHT, image);
#pragma omp parallel sections
{
// On one processor
// We rewrite a small part of image:
#pragma smecy map(PE, 0) arg(3, inout, [HEIGHT][WIDTH] \
/[HEIGHT/3:HEIGHT/3 + HEIGHT/2 - 1] \
[WIDTH/8:WIDTH/8 + HEIGHT/2 - 1])
square_symmetry(WIDTH, HEIGHT, image, HEIGHT/2, WIDTH/8, HEIGHT/3);
// On another processor
#pragma omp section
// Here let the compiler to guess the array size
#pragma smecy map(PE, 1) arg(3, inout, /[HEIGHT/4:HEIGHT/4 + HEIGHT/2 - 1] \
[3*WIDTH/8:3*WIDTH/8 + HEIGHT/2 - 1])
square_symmetry(WIDTH, HEIGHT, image, HEIGHT/2, 3*WIDTH/4, HEIGHT/4);
// On another processor
#pragma omp section
// Here let the compiler to guess the array size
#pragma smecy map(PE, 1) arg(3, inout, /[2*HEIGHT/5:2*HEIGHT/5 + HEIGHT/2 - 1] \
[WIDTH/2:WIDTH/2 + HEIGHT/2 - 1])
square_symmetry(WIDTH, HEIGHT, image, HEIGHT/2, WIDTH/2, 2*HEIGHT/5);
}
// Here there is a synchronization because of the parallel part end
// Since there
normalize_to_char(WIDTH, HEIGHT, image, output);
write_pgm_image("2D_example-output.pgm", WIDTH, HEIGHT, output);
return EXIT_SUCCESS;
}
|
17_primes-par3.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
int main(int argc, char **argv) {
// quantos numeros primos entre 1 e N ?
unsigned long n = 99999;
unsigned long aux = 2;
unsigned long primes = 0;
#pragma omp parallel for firstprivate(aux) reduction(+:primes) schedule(guided)
for (unsigned long i = 2; i < n; i++) {
while (aux < i) {
if (i % aux == 0) break;
aux++;
}
if (aux == i) primes++;
aux = 2;
}
printf("%lu primos entre 1 e %lu\n",primes,n);
return 0;
}
|
GB_unop__identity_fp32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp32_uint64
// op(A') function: GB_unop_tran__identity_fp32_uint64
// C type: float
// A type: uint64_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp32_uint64
(
float *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_7x7.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv7x7s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = out + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* r7 = img0 + w*7;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3
float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7
float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6
float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8
float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r10n = vld1q_f32(r1 + 8);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r15 = vextq_f32(_r14, _r10n, 1);
float32x4_t _r16 = vextq_f32(_r14, _r10n, 2);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r20n = vld1q_f32(r2 + 8);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r25 = vextq_f32(_r24, _r20n, 1);
float32x4_t _r26 = vextq_f32(_r24, _r20n, 2);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r30n = vld1q_f32(r3 + 8);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r35 = vextq_f32(_r34, _r30n, 1);
float32x4_t _r36 = vextq_f32(_r34, _r30n, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r40n = vld1q_f32(r4 + 8);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
float32x4_t _r45 = vextq_f32(_r44, _r40n, 1);
float32x4_t _r46 = vextq_f32(_r44, _r40n, 2);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r54 = vld1q_f32(r5 + 4);
float32x4_t _r50n = vld1q_f32(r5 + 8);
float32x4_t _r51 = vextq_f32(_r50, _r54, 1);
float32x4_t _r52 = vextq_f32(_r50, _r54, 2);
float32x4_t _r53 = vextq_f32(_r50, _r54, 3);
float32x4_t _r55 = vextq_f32(_r54, _r50n, 1);
float32x4_t _r56 = vextq_f32(_r54, _r50n, 2);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
float32x4_t _r60 = vld1q_f32(r6);
float32x4_t _r64 = vld1q_f32(r6 + 4);
float32x4_t _r60n = vld1q_f32(r6 + 8);
float32x4_t _r61 = vextq_f32(_r60, _r64, 1);
float32x4_t _r62 = vextq_f32(_r60, _r64, 2);
float32x4_t _r63 = vextq_f32(_r60, _r64, 3);
float32x4_t _r65 = vextq_f32(_r64, _r60n, 1);
float32x4_t _r66 = vextq_f32(_r64, _r60n, 2);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
r6 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d25}, [%1] \n"// _sum
"veor q13, q13 \n"// _sum2 = 0;
"veor q14, q14 \n"// _sum3 = 0;
"veor q15, q15 \n"// _sum4 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3
"vmla.f32 q12, q0, d8[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4
"vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5
"vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #128] \n"
"vld1.f32 {d0-d1}, [%4]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5] \n"
"vmla.f32 q14, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q15, q1, d12[1] \n"
"vmla.f32 q12, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #128] \n"
"vld1.f32 {d0-d1}, [%6]! \n"
"vmla.f32 q12, q0, d8[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n"
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #128] \n"
"vld1.f32 {d0-d1}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d4-d7}, [%8] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vst1.f32 {d24-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
outptr++;
}
r0 += 6;
r1 += 6;
r2 += 6;
r3 += 6;
r4 += 6;
r5 += 6;
r6 += 6;
}
}
}
}
static void conv7x7s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = out + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4x2_t _r00_02461357 = vld2q_f32(r0);
float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8);
float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14
float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15
float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6
float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8
float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9
float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10
float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11
float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4x2_t _r10_02461357 = vld2q_f32(r1);
float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8);
float32x4_t _r1_8101214 = _r10nx2.val[0];
float32x4_t _r1_9111315 = _r10nx2.val[1];
float32x4_t _r10 = _r10_02461357.val[0];
float32x4_t _r11 = _r10_02461357.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1);
float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1);
float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2);
float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2);
float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4x2_t _r20_02461357 = vld2q_f32(r2);
float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8);
float32x4_t _r2_8101214 = _r20nx2.val[0];
float32x4_t _r2_9111315 = _r20nx2.val[1];
float32x4_t _r20 = _r20_02461357.val[0];
float32x4_t _r21 = _r20_02461357.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1);
float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1);
float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2);
float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2);
float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4x2_t _r30_02461357 = vld2q_f32(r3);
float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8);
float32x4_t _r3_8101214 = _r30nx2.val[0];
float32x4_t _r3_9111315 = _r30nx2.val[1];
float32x4_t _r30 = _r30_02461357.val[0];
float32x4_t _r31 = _r30_02461357.val[1];
float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1);
float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1);
float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2);
float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2);
float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4x2_t _r40_02461357 = vld2q_f32(r4);
float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8);
float32x4_t _r4_8101214 = _r40nx2.val[0];
float32x4_t _r4_9111315 = _r40nx2.val[1];
float32x4_t _r40 = _r40_02461357.val[0];
float32x4_t _r41 = _r40_02461357.val[1];
float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1);
float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1);
float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2);
float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2);
float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4x2_t _r50_02461357 = vld2q_f32(r5);
float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8);
float32x4_t _r5_8101214 = _r50nx2.val[0];
float32x4_t _r5_9111315 = _r50nx2.val[1];
float32x4_t _r50 = _r50_02461357.val[0];
float32x4_t _r51 = _r50_02461357.val[1];
float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1);
float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1);
float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2);
float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2);
float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
float32x4x2_t _r60_02461357 = vld2q_f32(r6);
float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8);
float32x4_t _r6_8101214 = _r60nx2.val[0];
float32x4_t _r6_9111315 = _r60nx2.val[1];
float32x4_t _r60 = _r60_02461357.val[0];
float32x4_t _r61 = _r60_02461357.val[1];
float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1);
float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1);
float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2);
float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2);
float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d26-d27}, [%1] \n"// _sum
"veor q14, q14 \n"// _sum2 = 0;
"veor q15, q15 \n"// _sum3 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #512] \n"
"vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15
"vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8
"vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10
"vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12
"vmla.f32 q13, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #512] \n"
"vld2.f32 {d0-d3}, [%3]! \n"
"vmla.f32 q14, q0, d12[0] \n"
"vmla.f32 q15, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%3] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q13, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #512] \n"
"vld2.f32 {d0-d3}, [%4]! \n"
"vmla.f32 q15, q0, d8[0] \n"
"vmla.f32 q13, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%4] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #512] \n"
"vld2.f32 {d0-d3}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"vmla.f32 q14, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%5] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #512] \n"
"vld2.f32 {d0-d3}, [%6]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"vmla.f32 q15, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%6] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #512] \n"
"vld2.f32 {d0-d3}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"vmla.f32 q13, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%7] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d14[0] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #512] \n"
"vld2.f32 {d0-d3}, [%8]! \n"
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%8] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d11[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q13, q13, q14 \n"
"vst1.f32 {d26-d27}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
// Does the work necessary to deal with a SYCL kernel lambda. At the moment,
// this just marks the list of lambdas required to name the kernel.
void AddSYCLKernelLambda(const FunctionDecl *FD);
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
/// Increment when we find a reference; decrement when we find an ignored
/// assignment. Ultimately the value is 0 if every reference is an ignored
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// Retrieve the current function, if any, that should be analyzed for
/// potential availability violations.
sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType getDecltypeForParenthesizedExpr(Expr *E);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
void warnOnReservedIdentifier(const NamedDecl *D);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
/// Merge availability attributes for an implementation of
/// an optional protocol requirement.
AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
struct NamedReturnInfo {
const VarDecl *Candidate;
enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
Status S;
bool isMoveEligible() const { return S != None; };
bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
NamedReturnInfo getNamedReturnInfo(Expr *&E, bool ForceCXX2b = false);
NamedReturnInfo getNamedReturnInfo(const VarDecl *VD,
bool ForceCXX20 = false);
const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const NamedReturnInfo &NRInfo,
Expr *Value);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
void DiagnoseUnusedButSetDecl(const VarDecl *VD);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI);
ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
ParsedType ParsedTy);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc,
const LookupResult *R = nullptr,
const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation,
bool IsUsingIfExists);
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
// Complete an enum decl, maybe without a scope spec.
bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc,
bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
struct DeclareTargetContextInfo {
struct MapInfo {
OMPDeclareTargetDeclAttr::MapTypeTy MT;
SourceLocation Loc;
};
/// Explicitly listed variables and functions in a 'to' or 'link' clause.
llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
/// The 'device_type' as parsed from the clause.
OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
/// The directive location.
SourceLocation Loc;
DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
: Kind(Kind), Loc(Loc) {}
};
/// Number of nested '#pragma omp declare target' directives.
SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true,
bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Analyzes and checks a loop nest for use by a loop transformation.
///
/// \param Kind The loop transformation directive kind.
/// \param NumLoops How many nested loops the directive is expecting.
/// \param AStmt Associated statement of the transformation directive.
/// \param LoopHelpers [out] The loop analysis result.
/// \param Body [out] The body code nested in \p NumLoops loop.
/// \param OriginalInits [out] Collection of statements and declarations that
/// must have been executed/declared before entering the
/// loop.
///
/// \return Whether there was any error.
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
Stmt *&Body,
SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
&OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<StringRef> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Called at the end of target region i.e. '#pragma omp end declare target'.
const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
/// Called once a target context is completed, that can be when a
/// '#pragma omp end declare target' was encountered or when a
/// '#pragma omp declare target' without declaration-definition-seq was
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'full' clauses.
OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-form 'partial' clauses.
OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult
ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_PRValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
convolution_packnto1_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
const __fp16* kptr = weight_data_fp16.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn;
for (int k = 0; k < maxk; k++)
{
vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl);
vfloat16m1_t _w = vle16_v_f16m1(kptr, vl);
_sum = vfwmacc_vv_f32m2(_sum, _val, _w, vl);
kptr += packn;
}
}
#if C906
// TODO
std::vector<float> ss(packn);
vse32_v_f32m2((float*)ss.data(), _sum, vl);
for (int i = 0; i < packn; i++)
{
sum += ss[i];
}
#else
sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl));
#endif
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = (__fp16)sum;
}
outptr += outw;
}
}
}
static void convolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const __fp16* bias_data_ptr = bias_data_fp16;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__fp16 sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
const __fp16* kptr = weight_data_fp16.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn;
for (int k = 0; k < maxk; k++)
{
vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl);
vfloat16m1_t _w = vle16_v_f16m1(kptr, vl);
_sum = vfmacc_vv_f16m1(_sum, _val, _w, vl);
kptr += packn;
}
}
sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl));
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
FullyDistVec.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FULLY_DIST_VEC_H_
#define _FULLY_DIST_VEC_H_
#include <iostream>
#include <fstream>
#include <vector>
#include <utility>
#include <iterator>
#include <random>
#include "CombBLAS.h"
#include "CommGrid.h"
#include "FullyDist.h"
#include "Exception.h"
namespace combblas {
template <class IT, class NT>
class FullyDistSpVec;
template <class IT, class NT, class DER>
class SpParMat;
template <class IT>
class DistEdgeList;
template <class IU, class NU>
class DenseVectorLocalIterator;
// ABAB: As opposed to SpParMat, IT here is used to encode global size and global indices;
// therefore it can not be 32-bits, in general.
template <class IT, class NT>
class FullyDistVec: public FullyDist<IT,NT, typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type >
{
public:
FullyDistVec ( );
FullyDistVec ( IT globallen, NT initval);
FullyDistVec ( std::shared_ptr<CommGrid> grid);
FullyDistVec ( std::shared_ptr<CommGrid> grid, IT globallen, NT initval);
FullyDistVec ( const FullyDistSpVec<IT, NT> & rhs ); // Sparse -> Dense conversion constructor
FullyDistVec ( const std::vector<NT> & fillarr, std::shared_ptr<CommGrid> grid ); // initialize a FullyDistVec with a vector of length n/p from each processor
template <class ITRHS, class NTRHS>
FullyDistVec ( const FullyDistVec<ITRHS, NTRHS>& rhs ); // type converter constructor
class ScalarReadSaveHandler
{
public:
NT getNoNum(IT index) { return static_cast<NT>(1); }
template <typename c, typename t>
NT read(std::basic_istream<c,t>& is, IT index)
{
NT v;
is >> v;
return v;
}
template <typename c, typename t>
void save(std::basic_ostream<c,t>& os, const NT& v, IT index)
{
os << v;
}
};
template <class HANDLER>
void ParallelWrite(const std::string & filename, bool onebased, HANDLER handler, bool includeindices = true)
{
FullyDistSpVec<IT,NT> tmpSpVec = *this; // delegate
tmpSpVec.ParallelWrite(filename, onebased, handler, includeindices);
}
void ParallelWrite(const std::string & filename, bool onebased, bool includeindices = true) { ParallelWrite(filename, onebased, ScalarReadSaveHandler(), includeindices); };
template <typename _BinaryOperation>
void ParallelRead (const std::string & filename, bool onebased, _BinaryOperation BinOp)
{
FullyDistSpVec<IT,NT> tmpSpVec = *this; // delegate
tmpSpVec.ParallelRead(filename, onebased, BinOp);
*this = tmpSpVec; // sparse -> dense conversion
}
template <class HANDLER>
std::ifstream& ReadDistribute (std::ifstream& infile, int master, HANDLER handler);
std::ifstream& ReadDistribute (std::ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); }
template <class HANDLER>
void SaveGathered(std::ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false);
void SaveGathered(std::ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler(), false); }
template <class ITRHS, class NTRHS>
FullyDistVec<IT,NT> & operator=(const FullyDistVec< ITRHS,NTRHS > & rhs); // assignment with type conversion
FullyDistVec<IT,NT> & operator=(const FullyDistVec<IT,NT> & rhs); //!< Actual assignment operator
FullyDistVec<IT,NT> & operator=(const FullyDistSpVec<IT,NT> & rhs); //!< FullyDistSpVec->FullyDistVec conversion operator
FullyDistVec<IT,NT> & operator=(NT fixedval) // assign fixed value
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < arr.size(); ++i)
arr[i] = fixedval;
return *this;
}
FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //<! subsref
FullyDistVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator+=(const FullyDistVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistVec<IT,NT> & rhs);
bool operator==(const FullyDistVec<IT,NT> & rhs) const;
void SetElement (IT indx, NT numx); // element-wise assignment
void SetLocalElement(IT index, NT value) { arr[index] = value; }; // no checks, local index
NT GetElement (IT indx) const; // element-wise fetch
NT operator[](IT indx) const // more c++ like API
{
return GetElement(indx);
}
void Set(const FullyDistSpVec< IT,NT > & rhs);
template <class NT1, typename _BinaryOperationIdx, typename _BinaryOperationVal>
void GSet (const FullyDistSpVec<IT,NT1> & spVec, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, MPI_Win win);
template <class NT1, typename _BinaryOperationIdx>
FullyDistSpVec<IT,NT> GGet (const FullyDistSpVec<IT,NT1> & spVec, _BinaryOperationIdx __binopIdx, NT nullValue);
void iota(IT globalsize, NT first);
void RandPerm(); // randomly permute the vector
FullyDistVec<IT,IT> sort(); // sort and return the permutation
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::LengthUntil;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::TotalLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::Owner;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyLocLength;
IT LocArrSize() const { return arr.size(); } // = MyLocLength() once arr is resized
//TODO: we should change this function and return the vector directly
const NT * GetLocArr() const { return arr.data(); } // = MyLocLength() once arr is resized
const std::vector<NT>& GetLocVec() const { return arr; }
template <typename _Predicate>
FullyDistSpVec<IT,NT> Find(_Predicate pred) const; //!< Return the elements for which pred is true
FullyDistSpVec<IT,NT> Find(NT val) const; //!< Return the elements val is found
template <typename _Predicate>
FullyDistVec<IT,IT> FindInds(_Predicate pred) const; //!< Return the indices where pred is true
template <typename _Predicate>
IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true
template <typename _UnaryOperation>
void Apply(_UnaryOperation __unary_op)
{
std::transform(arr.begin(), arr.end(), arr.begin(), __unary_op);
}
template <typename _BinaryOperation>
void ApplyInd(_BinaryOperation __binary_op)
{
IT offset = LengthUntil();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(size_t i=0; i < arr.size(); ++i)
arr[i] = __binary_op(arr[i], i + offset);
}
template <typename _UnaryOperation, typename IRRELEVANT_NT>
void Apply(_UnaryOperation __unary_op, const FullyDistSpVec<IT,IRRELEVANT_NT>& mask);
// extended callback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, const bool useExtendedBinOp);
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue, const bool useExtendedBinOp);
// plain fallback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
true);
}
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
applyNulls, nullValue, true);
}
template <typename T1, typename T2>
class retTrue {
public:
bool operator()(const T1& x, const T2& y)
{
return true;
}
};
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>());
}
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, bool applyNulls, NT2 nullValue)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>(), applyNulls, nullValue);
}
void PrintToFile(std::string prefix)
{
std::ofstream output;
commGrid->OpenDebugFile(prefix, output);
std::copy(arr.begin(), arr.end(), std::ostream_iterator<NT> (output, " "));
output << std::endl;
output.close();
}
void PrintInfo(std::string vectorname) const;
void DebugPrint();
std::shared_ptr<CommGrid> getcommgrid() const { return commGrid; }
std::pair<IT, NT> MinElement() const; // returns <index, value> pair of global minimum
template <typename _BinaryOperation>
NT Reduce(_BinaryOperation __binary_op, NT identity) const; //! Reduce can be used to implement max_element, for instance
template <typename OUT, typename _BinaryOperation, typename _UnaryOperation>
OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op) const;
void SelectCandidates(double nver);
template <typename _BinaryOperation, typename OUT = typename std::result_of<_BinaryOperation&(NT,NT)>::type>
void EWiseOut(const FullyDistVec<IT,NT> & rhs, _BinaryOperation __binary_op, FullyDistVec<IT,OUT> & result);
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::glen;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::commGrid;
private:
std::vector< NT > arr;
template <typename _BinaryOperation>
void EWise(const FullyDistVec<IT,NT> & rhs, _BinaryOperation __binary_op);
template <class IU, class NU>
friend class DenseParMat;
template <class IU, class NU, class UDER>
friend class SpParMat;
template <class IU, class NU>
friend class FullyDistVec;
template <class IU, class NU>
friend class FullyDistSpVec;
template <class IU, class NU>
friend class DenseVectorLocalIterator;
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
friend FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x );
template <typename IU, typename NU1, typename NU2>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero);
template <typename IU, typename NU1, typename NU2, typename _BinaryOperation>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, typename promote_trait<NU1,NU2>::T_promote zero);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename IU>
friend void RenameVertices(DistEdgeList<IU> & DEL);
template <typename IU, typename NU>
friend FullyDistVec<IU,NU> Concatenate ( std::vector< FullyDistVec<IU,NU> > & vecs);
template <typename IU, typename NU>
friend void Augment (FullyDistVec<int64_t, int64_t>& mateRow2Col, FullyDistVec<int64_t, int64_t>& mateCol2Row,
FullyDistVec<int64_t, int64_t>& parentsRow, FullyDistVec<int64_t, int64_t>& leaves);
template <class IU, class DER>
friend SpParMat<IU, bool, DER> PermMat (const FullyDistVec<IU,IU> & ri, const IU ncol);
friend void maximumMatching(SpParMat < int64_t, bool, SpDCCols<int64_t,bool> > & A, FullyDistVec<int64_t, int64_t>& mateRow2Col,FullyDistVec<int64_t, int64_t>& mateCol2Row);
};
}
#include "FullyDistVec.cpp"
#endif
|
triangleCount.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : triangleCount.c
// Create : 2019-06-29 12:31:24
// Revise : 2019-09-28 15:34:11
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "timer.h"
#include "myMalloc.h"
#include "boolean.h"
#include "arrayQueue.h"
#include "bitmap.h"
#include "reorder.h"
#include "graphConfig.h"
#include "graphCSR.h"
#include "graphGrid.h"
#include "graphAdjArrayList.h"
#include "graphAdjLinkedList.h"
#include "triangleCount.h"
struct TCStats *newTCStatsGraphCSR(struct GraphCSR *graph)
{
uint32_t v;
struct TCStats *stats = (struct TCStats *) my_malloc(sizeof(struct TCStats));
stats->total_counts = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
#pragma omp parallel for default(none) private(v) shared(stats)
for(v = 0; v < stats->num_vertices; v++)
{
stats->counts[v] = 0;
}
return stats;
}
struct TCStats *newTCStatsGraphGrid(struct GraphGrid *graph)
{
uint32_t v;
struct TCStats *stats = (struct TCStats *) my_malloc(sizeof(struct TCStats));
stats->total_counts = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
#pragma omp parallel for default(none) private(v) shared(stats)
for(v = 0; v < stats->num_vertices; v++)
{
stats->counts[v] = 0;
}
return stats;
}
struct TCStats *newTCStatsGraphAdjArrayList(struct GraphAdjArrayList *graph)
{
uint32_t v;
struct TCStats *stats = (struct TCStats *) my_malloc(sizeof(struct TCStats));
stats->total_counts = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
#pragma omp parallel for default(none) private(v) shared(stats)
for(v = 0; v < stats->num_vertices; v++)
{
stats->counts[v] = 0;
}
return stats;
}
struct TCStats *newTCStatsGraphAdjLinkedList(struct GraphAdjLinkedList *graph)
{
uint32_t v;
struct TCStats *stats = (struct TCStats *) my_malloc(sizeof(struct TCStats));
stats->total_counts = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
stats->counts = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
#pragma omp parallel for default(none) private(v) shared(stats)
for(v = 0; v < stats->num_vertices; v++)
{
stats->counts[v] = 0;
}
return stats;
}
void freeTCStats(struct TCStats *stats)
{
if(stats)
{
if(stats->counts)
free(stats->counts);
free(stats);
}
}
// ********************************************************************************************
// *************** Helper Functions **************
// ********************************************************************************************
uint32_t minTwoNodes(uint32_t node_v, uint32_t node_u, uint32_t degree_v, uint32_t degree_u)
{
if(degree_v < degree_u)
return node_v;
else
return node_u;
}
uint32_t maxTwoNodes(uint32_t node_v, uint32_t node_u, uint32_t degree_v, uint32_t degree_u)
{
if(degree_u > degree_v)
return node_u;
else
return node_v;
}
uint32_t countIntersectionsBinarySearch(uint32_t u, uint32_t v, struct GraphCSR *graph)
{
uint32_t count = 0;
uint32_t degree_iter = graph->vertices->out_degree[v];
uint32_t edge_idx_iter = graph->vertices->edges_idx[v];
uint32_t degree_comp = graph->vertices->out_degree[u];
uint32_t edge_idx_comp = graph->vertices->edges_idx[u];
uint32_t iter;
for(iter = edge_idx_iter ; iter < (edge_idx_iter + degree_iter); iter++ )
{
uint32_t u_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);
if(u_iter > v)
break;
uint32_t bottom = 0;
uint32_t top = degree_comp;
uint32_t mid = (top + bottom) >> 1;
uint32_t v_comp = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_comp + mid]);
while( bottom < (top - 1))
{
if(u_iter < v_comp)
{
top = mid;
}
else if ( u_iter > v_comp)
{
bottom = mid;
}
else
{
count++;
break;
}
mid = (top + bottom) >> 1;
v_comp = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_comp + mid]);
u_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);
}
if((top - 1) == 0 && u_iter == v_comp)
count++;
}
return count;
}
// ********************************************************************************************
// *************** CSR DataStructure **************
// ********************************************************************************************
struct TCStats *triangleCountGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct TCStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // basic slow
stats = triangleCountBasicGraphCSR(arguments, graph);
break;
case 1: // pull
stats = triangleCountPullGraphCSR(arguments, graph);
break;
case 2: // push
stats = triangleCountPushGraphCSR(arguments, graph);
break;
case 3: // With binary intersection
stats = triangleCountBinaryIntersectionGraphCSR(arguments, graph);
break;
default:// pull
stats = triangleCountPullGraphCSR(arguments, graph);
break;
}
return stats;
}
struct TCStats *triangleCountBasicGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
uint32_t u;
uint64_t counts = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count-basic");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphCSR(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Start(timer);
#pragma omp parallel for shared(stats) schedule(dynamic, 128)
for(u = 0; u < graph->num_vertices; u++)
{
uint32_t degree_u = graph->vertices->out_degree[u];
uint32_t edge_idx_u = graph->vertices->edges_idx[u];
uint32_t v;
for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)
{
uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);
uint32_t degree_v = graph->vertices->out_degree[node_v];
uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];
uint32_t w;
uint32_t degree_iter = graph->vertices->out_degree[u];
uint32_t edge_idx_iter = graph->vertices->edges_idx[u];
uint32_t iter;
for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)
{
uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);
uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);
for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)
{
node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);
if(node_iter == node_w)
// #pragma omp atomic update
stats->counts[u]++;
}
}
}
}
Stop(timer);
stats->time_total = Seconds(timer);
#pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)
for(u = 0; u < stats->num_vertices; u++)
{
counts += stats->counts[u];
}
stats->total_counts = counts / 6;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
free(timer);
return stats;
}
struct TCStats *triangleCountPullGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
uint32_t u;
uint64_t counts = 0;
uint64_t steps = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count-PULL");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphCSR(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Start(timer);
#pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)
for(u = 0; u < graph->num_vertices; u++)
{
uint32_t degree_u = graph->vertices->out_degree[u];
uint32_t edge_idx_u = graph->vertices->edges_idx[u];
uint32_t v;
steps++;
for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)
{
uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);
uint32_t degree_v = graph->vertices->out_degree[node_v];
if(node_v > u)
break;
uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];
uint32_t w;
uint32_t degree_iter = graph->vertices->out_degree[u];
uint32_t edge_idx_iter = graph->vertices->edges_idx[u];
uint32_t iter;
for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)
{
uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);
if(node_w > node_v)
break;
uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);
for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)
{
node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);
if(node_iter >= node_w)
break;
}
if(node_w == node_iter)
{
counts++;
}
}
}
}
Stop(timer);
stats->time_total = Seconds(timer);
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
free(timer);
return stats;
}
struct TCStats *triangleCountPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
uint32_t u;
uint64_t counts = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count-PUSH");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphCSR(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Start(timer);
#pragma omp parallel for shared(stats) schedule(dynamic, 128)
for(u = 0; u < graph->num_vertices; u++)
{
uint32_t degree_u = graph->vertices->out_degree[u];
uint32_t edge_idx_u = graph->vertices->edges_idx[u];
uint32_t v;
for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)
{
uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);
if(node_v > u)
break;
uint32_t degree_v = graph->vertices->out_degree[node_v];
uint32_t edge_idx_v = graph->vertices->edges_idx[node_v];
uint32_t w;
uint32_t degree_iter = graph->vertices->out_degree[u];
uint32_t edge_idx_iter = graph->vertices->edges_idx[u];
uint32_t iter;
for(w = edge_idx_v; w < (edge_idx_v + degree_v) ; w++)
{
uint32_t node_w = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[w]);
if(node_w > node_v)
break;
uint32_t node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[edge_idx_iter]);
for(iter = edge_idx_iter; iter < (edge_idx_iter + degree_iter) ; iter++)
{
node_iter = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[iter]);
if(node_iter >= node_w)
break;
}
if(node_w == node_iter)
{
#pragma omp atomic update
stats->counts[node_w]++;
}
}
}
}
Stop(timer);
stats->time_total = Seconds(timer);
#pragma omp parallel for default(none) reduction (+ : counts) private(u) shared(stats)
for(u = 0; u < stats->num_vertices; u++)
{
counts += stats->counts[u];
}
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
free(timer);
return stats;
}
struct TCStats *triangleCountBinaryIntersectionGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
uint32_t u;
uint64_t counts = 0;
uint64_t steps = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Binary-Intersection");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphCSR(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Start(timer);
#pragma omp parallel for shared(stats) reduction(+:counts) schedule(dynamic, 128)
for(u = 0; u < graph->num_vertices; u++)
{
uint32_t degree_u = graph->vertices->out_degree[u];
uint32_t edge_idx_u = graph->vertices->edges_idx[u];
uint32_t v;
steps++;
for(v = edge_idx_u; v < (edge_idx_u + degree_u) ; v++)
{
uint32_t node_v = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[v]);
if(node_v > u)
break;
counts += countIntersectionsBinarySearch(u, node_v, graph);
}
}
Stop(timer);
stats->time_total = Seconds(timer);
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
free(timer);
return stats;
}
// ********************************************************************************************
// *************** GRID DataStructure **************
// ********************************************************************************************
struct TCStats *triangleCountGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
struct TCStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = triangleCountRowGraphGrid(graph);
break;
case 1: // push
stats = triangleCountColumnGraphGrid(graph);
break;
default:// pull
stats = triangleCountRowGraphGrid(graph);
break;
}
return stats;
}
struct TCStats *triangleCountRowGraphGrid(struct GraphGrid *graph)
{
uint64_t counts = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count To Be Implemented");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphGrid(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Stop(timer);
stats->time_total = Seconds(timer);
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
return stats;
}
struct TCStats *triangleCountColumnGraphGrid(struct GraphGrid *graph)
{
uint64_t counts = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count To Be Implemented");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphGrid(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Stop(timer);
stats->time_total = Seconds(timer);
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
return stats;
}
// ********************************************************************************************
// *************** ArrayList DataStructure **************
// ********************************************************************************************
struct TCStats *triangleCountGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct TCStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = triangleCountPullGraphAdjArrayList(graph);
break;
case 1: // push
stats = triangleCountPullGraphAdjArrayList(graph);
break;
default:// pull
stats = triangleCountPullGraphAdjArrayList(graph);
break;
}
return stats;
}
struct TCStats *triangleCountPullGraphAdjArrayList(struct GraphAdjArrayList *graph)
{
uint64_t counts = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count To Be Implemented");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Stop(timer);
stats->time_total = Seconds(timer);
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
return stats;
}
struct TCStats *triangleCountPushGraphAdjArrayList(struct GraphAdjArrayList *graph)
{
uint64_t counts = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count To Be Implemented");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Stop(timer);
stats->time_total = Seconds(timer);
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
return stats;
}
// ********************************************************************************************
// *************** LinkedList DataStructure **************
// ********************************************************************************************
struct TCStats *triangleCountGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct TCStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = triangleCountPullGraphAdjLinkedList(graph);
break;
case 1: // push
stats = triangleCountPushGraphAdjLinkedList(graph);
break;
default:// pull
stats = triangleCountPullGraphAdjLinkedList(graph);
break;
}
return stats;
}
struct TCStats *triangleCountPullGraphAdjLinkedList(struct GraphAdjLinkedList *graph)
{
uint64_t counts = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count To Be Implemented");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Stop(timer);
stats->time_total = Seconds(timer);
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
return stats;
}
struct TCStats *triangleCountPushGraphAdjLinkedList(struct GraphAdjLinkedList *graph)
{
uint64_t counts = 0;
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Triangle Count To Be Implemented");
printf(" -----------------------------------------------------\n");
printf("| %-21s | %-27s | \n", "Triangle Counts", "Time (S)");
printf(" -----------------------------------------------------\n");
struct TCStats *stats = newTCStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Stop(timer);
stats->time_total = Seconds(timer);
stats->total_counts = counts;
printf("| %-21lu | %-27f | \n", stats->total_counts, stats->time_total);
printf(" -----------------------------------------------------\n");
return stats;
} |
ParFriends.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _PAR_FRIENDS_H_
#define _PAR_FRIENDS_H_
#include "mpi.h"
#include <iostream>
#include <cstdarg>
#include "SpParMat.h"
#include "SpParHelper.h"
#include "MPIType.h"
#include "Friends.h"
#include "OptBuf.h"
#include "mtSpGEMM.h"
#include "MultiwayMerge.h"
#include <type_traits>
namespace combblas {
template <class IT, class NT, class DER>
class SpParMat;
/*************************************************************************************************/
/**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/
/*************************************************************************************************/
/**
** Concatenate all the FullyDistVec<IT,NT> objects into a single one
**/
template <typename IT, typename NT>
FullyDistVec<IT,NT> Concatenate ( std::vector< FullyDistVec<IT,NT> > & vecs)
{
if(vecs.size() < 1)
{
SpParHelper::Print("Warning: Nothing to concatenate, returning empty ");
return FullyDistVec<IT,NT>();
}
else if (vecs.size() < 2)
{
return vecs[1];
}
else
{
typename std::vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin();
std::shared_ptr<CommGrid> commGridPtr = it->getcommgrid();
MPI_Comm World = commGridPtr->GetWorld();
IT nglen = it->TotalLength(); // new global length
IT cumloclen = it->MyLocLength(); // existing cumulative local lengths
++it;
for(; it != vecs.end(); ++it)
{
if(*(commGridPtr) != *(it->getcommgrid()))
{
SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n");
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
}
nglen += it->TotalLength();
cumloclen += it->MyLocLength();
}
FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT());
int nprocs = commGridPtr->GetSize();
std::vector< std::vector< NT > > data(nprocs);
std::vector< std::vector< IT > > inds(nprocs);
IT gloffset = 0;
for(it = vecs.begin(); it != vecs.end(); ++it)
{
IT loclen = it->LocArrSize();
for(IT i=0; i < loclen; ++i)
{
IT locind;
IT loffset = it->LengthUntil();
int owner = ConCat.Owner(gloffset+loffset+i, locind);
data[owner].push_back(it->arr[i]);
inds[owner].push_back(locind);
}
gloffset += it->TotalLength();
}
int * sendcnt = new int[nprocs];
int * sdispls = new int[nprocs];
for(int i=0; i<nprocs; ++i)
sendcnt[i] = (int) data[i].size();
int * rdispls = new int[nprocs];
int * recvcnt = new int[nprocs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<nprocs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0));
NT * senddatabuf = new NT[cumloclen];
for(int i=0; i<nprocs; ++i)
{
std::copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]);
std::vector<NT>().swap(data[i]); // delete data vectors
}
NT * recvdatabuf = new NT[totrecv];
MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data
delete [] senddatabuf;
IT * sendindsbuf = new IT[cumloclen];
for(int i=0; i<nprocs; ++i)
{
std::copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]);
std::vector<IT>().swap(inds[i]); // delete inds vectors
}
IT * recvindsbuf = new IT[totrecv];
MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds
DeleteAll(sendindsbuf, sendcnt, sdispls);
for(int i=0; i<nprocs; ++i)
{
for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j)
{
ConCat.arr[recvindsbuf[j]] = recvdatabuf[j];
}
}
DeleteAll(recvindsbuf, recvcnt, rdispls);
return ConCat;
}
}
template <typename MATRIXA, typename MATRIXB>
bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B)
{
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return false;
}
if((void*) &A == (void*) &B)
{
std::ostringstream outs;
outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS);
return false;
}
return true;
}
// Combined logic for prune, recovery, and select
template <typename IT, typename NT, typename DER>
void MCLPruneRecoverySelect(SpParMat<IT,NT,DER> & A, NT hardThreshold, IT selectNum, IT recoverNum, NT recoverPct, int kselectVersion)
{
#ifdef TIMING
double t0, t1;
#endif
// Prune and create a new pruned matrix
SpParMat<IT,NT,DER> PrunedA = A.Prune(std::bind2nd(std::less_equal<NT>(), hardThreshold), false);
// column-wise statistics of the pruned matrix
FullyDistVec<IT,NT> colSums = PrunedA.Reduce(Column, std::plus<NT>(), 0.0);
FullyDistVec<IT,NT> nnzPerColumn = PrunedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistVec<IT,NT> pruneCols(A.getcommgrid(), A.getncol(), hardThreshold);
PrunedA.FreeMemory();
// Check if we need recovery
// columns with nnz < recoverNum (r)
FullyDistSpVec<IT,NT> recoverCols(nnzPerColumn, std::bind2nd(std::less<NT>(), recoverNum));
recoverCols = recoverPct;
// columns with nnz < r AND sum < recoverPct (pct)
recoverCols = EWiseApply<NT>(recoverCols, colSums,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
IT nrecover = recoverCols.getnnz();
if(nrecover > 0)
{
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(recoverCols, recoverNum, kselectVersion);
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(recoverCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << "Number of columns needing recovery: " << nrecover << std::endl;
SpParHelper::Print(outs.str());
#endif
}
if(selectNum>0)
{
// remaining columns will be up for selection
FullyDistSpVec<IT,NT> selectCols = EWiseApply<NT>(recoverCols, colSums,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return spval==-1;},
true, static_cast<NT>(-1));
selectCols = selectNum;
selectCols = EWiseApply<NT>(selectCols, nnzPerColumn,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval > spval;},
false, NT());
IT nselect = selectCols.getnnz();
if(nselect > 0 )
{
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(selectCols, selectNum, kselectVersion); // PrunedA would also work
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(selectCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << "Number of columns needing selection: " << nselect << std::endl;
SpParHelper::Print(outs.str());
#endif
#ifdef TIMING
t0=MPI_Wtime();
#endif
SpParMat<IT,NT,DER> selectedA = A.PruneColumn(pruneCols, std::less<NT>(), false);
#ifdef TIMING
t1=MPI_Wtime();
mcl_prunecolumntime += (t1-t0);
#endif
if(recoverNum>0 ) // recovery can be attempted after selection
{
FullyDistVec<IT,NT> nnzPerColumn1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistVec<IT,NT> colSums1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0);
selectedA.FreeMemory();
// slected columns with nnz < recoverNum (r)
selectCols = recoverNum;
selectCols = EWiseApply<NT>(selectCols, nnzPerColumn1,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
// selected columns with sum < recoverPct (pct)
selectCols = recoverPct;
selectCols = EWiseApply<NT>(selectCols, colSums1,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
IT n_recovery_after_select = selectCols.getnnz();
if(n_recovery_after_select>0)
{
// mclExpandVector2 does it on the original vector
// mclExpandVector1 does it one pruned vector
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(selectCols, recoverNum, kselectVersion); // Kselect on PrunedA might give different result
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(selectCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs1;
outs1 << "Number of columns needing recovery after selection: " << nselect << std::endl;
SpParHelper::Print(outs1.str());
#endif
}
}
}
}
// final prune
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.PruneColumn(pruneCols, std::less<NT>(), true);
#ifdef TIMING
t1=MPI_Wtime();
mcl_prunecolumntime += (t1-t0);
#endif
// Add loops for empty columns
if(recoverNum<=0 ) // if recoverNum>0, recovery would have added nonzeros in empty columns
{
FullyDistVec<IT,NT> nnzPerColumnA = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistSpVec<IT,NT> emptyColumns(nnzPerColumnA, std::bind2nd(std::equal_to<NT>(), 0.0));
emptyColumns = 1.00;
//Ariful: We need a selective AddLoops function with a sparse vector
//A.AddLoops(emptyColumns);
}
}
/**
* Broadcasts A multiple times (#phases) in order to save storage in the output
* Only uses 1/phases of C memory if the threshold/max limits are proper
*/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NUO,UDERO> MemEfficientSpGEMM (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B,
int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory)
{
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return SpParMat< IU,NUO,UDERO >();
}
if(phases <1 || phases >= A.getncol())
{
SpParHelper::Print("MemEfficientSpGEMM: The value of phases is too small or large. Resetting to 1.\n");
phases = 1;
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
if(perProcessMemory>0) // estimate the number of phases permitted by memory
{
int p;
MPI_Comm World = GridC->GetWorld();
MPI_Comm_size(World,&p);
int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1);
int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO);
// max nnz(A) in a porcess
int64_t lannz = A.getlocalnnz();
int64_t gannz;
MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World);
int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA)
// max nnz(A^2) stored by summa in a porcess
int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B);
int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step
// estimate kselect memory
int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices)
// this is equivalent to (asquareNNZ * p) / B.getcol()
int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d );
int64_t kselectmem = B.getlocalcols() * k * 8 * 3;
// estimate output memory
int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p);
int64_t outputMem = outputNNZ * perNNZMem_in * 2;
//inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory
int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem;
if(remainingMem > 0)
{
phases = 1 + (asquareMem+kselectmem) / remainingMem;
}
if(myrank==0)
{
if(remainingMem < 0)
{
std::cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Warning: input and output memory requirement is greater than per-process avaiable memory. Keeping phase to the value supplied at the command line. The program may go out of memory and crash! \n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl;
}
#ifdef SHOW_MEMORY_USAGE
int64_t maxMemory = kselectmem/phases + inputMem + outputMem + asquareMem / phases;
if(maxMemory>1000000000)
std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000000.00 << " GB" << " inputMem: " << inputMem/1000000000.00 << " GB" << " outputMem: " << outputMem/1000000000.00 << " GB" << " kselectmem: " << kselectmem/1000000000.00 << " GB" << std::endl;
else
std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000.00 << " MB" << " inputMem: " << inputMem/1000000.00 << " MB" << " outputMem: " << outputMem/1000000.00 << " MB" << " kselectmem: " << kselectmem/1000000.00 << " MB" << std::endl;
#endif
}
}
LIA C_m = A.spSeq->getnrow();
LIB C_n = B.spSeq->getncol();
std::vector< UDERB > PiecesOfB;
UDERB CopyB = *(B.spSeq); // we allow alias matrices as input because of this local copy
CopyB.ColSplit(phases, PiecesOfB); // CopyB's memory is destroyed at this point
MPI_Barrier(GridC->GetWorld());
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same");
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< UDERO > toconcatenate;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int p = 0; p< phases; ++p)
{
SpParHelper::GetSetSizes( PiecesOfB[p], BRecvSizes, (B.commGrid)->GetColWorld());
std::vector< SpTuples<LIC,NUO> *> tomerge;
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself) ARecv = A.spSeq; // shallow-copy
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
ARecv = new UDERA(); // first, create the object
}
#ifdef TIMING
double t0=MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
#ifdef TIMING
double t1=MPI_Wtime();
mcl_Abcasttime += (t1-t0);
#endif
ess.clear();
if(i == Bself) BRecv = &(PiecesOfB[p]); // shallow-copy
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
ess[j] = BRecvSizes[j][i];
BRecv = new UDERB();
}
#ifdef TIMING
double t2=MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
#ifdef TIMING
double t3=MPI_Wtime();
mcl_Bbcasttime += (t3-t2);
#endif
#ifdef TIMING
double t4=MPI_Wtime();
#endif
//SpTuples<LIC,NUO> * C_cont = LocalSpGEMM<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself);
SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself);
#ifdef TIMING
double t5=MPI_Wtime();
mcl_localspgemmtime += (t5-t4);
#endif
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
} // all stages executed
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_unmerged, lcnnz_unmerged = 0;
for(size_t i = 0; i < tomerge.size(); ++i)
{
lcnnz_unmerged += tomerge[i]->getnnz();
}
MPI_Allreduce(&lcnnz_unmerged, &gcnnz_unmerged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
int64_t summa_memory = gcnnz_unmerged*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gannz + gannz/phases) * 20; // last two for broadcasts
if(myrank==0)
{
if(summa_memory>1000000000)
std::cout << p+1 << ". unmerged: " << summa_memory/1000000000.00 << "GB " ;
else
std::cout << p+1 << ". unmerged: " << summa_memory/1000000.00 << " MB " ;
}
#endif
#ifdef TIMING
double t6=MPI_Wtime();
#endif
//UDERO OnePieceOfC(MergeAll<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true), false);
// TODO: MultiwayMerge can directly return UDERO inorder to avoid the extra copy
SpTuples<LIC,NUO> * OnePieceOfC_tuples = MultiwayMerge<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true);
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_merged, lcnnz_merged ;
lcnnz_merged = OnePieceOfC_tuples->getnnz();
MPI_Allreduce(&lcnnz_merged, &gcnnz_merged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
// TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore
int64_t merge_memory = gcnnz_merged*2*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gcnnz_merged*2) * 20;
if(myrank==0)
{
if(merge_memory>1000000000)
std::cout << " merged: " << merge_memory/1000000000.00 << "GB " ;
else
std::cout << " merged: " << merge_memory/1000000.00 << " MB " ;
}
#endif
#ifdef TIMING
double t7=MPI_Wtime();
mcl_multiwaymergetime += (t7-t6);
#endif
UDERO * OnePieceOfC = new UDERO(* OnePieceOfC_tuples, false);
delete OnePieceOfC_tuples;
SpParMat<IU,NUO,UDERO> OnePieceOfC_mat(OnePieceOfC, GridC);
MCLPruneRecoverySelect(OnePieceOfC_mat, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion);
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_pruned, lcnnz_pruned ;
lcnnz_pruned = OnePieceOfC_mat.getlocalnnz();
MPI_Allreduce(&lcnnz_pruned, &gcnnz_pruned, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
// TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore
int64_t prune_memory = gcnnz_pruned*2*20;//(gannz*2 + phase_nnz + gcnnz_pruned*2) * 20 + kselectmem; // 3 extra copies of OnePieceOfC_mat, we can make it one extra copy!
//phase_nnz += gcnnz_pruned;
if(myrank==0)
{
if(prune_memory>1000000000)
std::cout << "Prune: " << prune_memory/1000000000.00 << "GB " << std::endl ;
else
std::cout << "Prune: " << prune_memory/1000000.00 << " MB " << std::endl ;
}
#endif
// ABAB: Change this to accept pointers to objects
toconcatenate.push_back(OnePieceOfC_mat.seq());
}
UDERO * C = new UDERO(0,C_m, C_n,0);
C->ColConcatenate(toconcatenate); // ABAB: Change this to accept a vector of pointers to pointers to DER objects
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERA::esscount);
return SpParMat<IU,NUO,UDERO> (C, GridC);
}
/**
* Parallel C = A*B routine that uses a double buffered broadcasting scheme
* @pre { Input matrices, A and B, should not alias }
* Most memory efficient version available. Total stages: 2*sqrt(p)
* Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C)
* Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C)
* Final memory requirement: nnz(C) if clearA and clearB are true
**/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same");
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
LIA C_m = A.spSeq->getnrow();
LIB C_n = B.spSeq->getncol();
UDERA * A1seq = new UDERA();
UDERA * A2seq = new UDERA();
UDERB * B1seq = new UDERB();
UDERB * B2seq = new UDERB();
(A.spSeq)->Split( *A1seq, *A2seq);
const_cast< UDERB* >(B.spSeq)->Transpose();
(B.spSeq)->Split( *B1seq, *B2seq);
// Transpose back for the column-by-column algorithm
const_cast< UDERB* >(B1seq)->Transpose();
const_cast< UDERB* >(B2seq)->Transpose();
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<LIC,NUO> *> tomerge;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A1seq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B1seq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// before activating this remove transposing B1seq
/*
SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
}
if(clearA) delete A1seq;
if(clearB) delete B1seq;
// Set the new dimensions
SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld());
// Start the second round
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A2seq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B2seq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// before activating this remove transposing B2seq
/*
SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
if(clearA)
{
delete A2seq;
delete A.spSeq;
A.spSeq = NULL;
}
else
{
(A.spSeq)->Merge(*A1seq, *A2seq);
delete A1seq;
delete A2seq;
}
if(clearB)
{
delete B2seq;
delete B.spSeq;
B.spSeq = NULL;
}
else
{
B1seq->Transpose();
B2seq->Transpose();
(B.spSeq)->Merge(*B1seq, *B2seq);
delete B1seq;
delete B2seq;
const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
}
UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false);
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
/**
* Parallel A = B*C routine that uses only MPI-1 features
* Relies on simple blocking broadcast
* @pre { Input matrices, A and B, should not alias }
**/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
//const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<IU,NUO> *> tomerge;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// before activating this transpose B first
/*SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
/*
SpTuples<IU,NUO> * C_cont = LocalSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << i << "th SUMMA iteration"<< std::endl;
SpParHelper::Print(outs.str());
#endif
}
if(clearA && A.spSeq != NULL)
{
delete A.spSeq;
A.spSeq = NULL;
}
if(clearB && B.spSeq != NULL)
{
delete B.spSeq;
B.spSeq = NULL;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
//UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false);
// First get the result in SpTuples, then convert to UDER
// the last parameter to MergeAll deletes tomerge arrays
SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true);
UDERO * C = new UDERO(*C_tuples, false);
delete C_tuples;
//if(!clearB)
// const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
/**
* Estimate the maximum nnz needed to store in a process from all stages of SUMMA before reduction
* @pre { Input matrices, A and B, should not alias }
**/
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
int64_t EstPerProcessNnzSUMMA(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B)
{
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
int64_t nnzC_SUMMA = 0;
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return nnzC_SUMMA;
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
MPI_Barrier(GridC->GetWorld());
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// no need to keep entries of colnnzC in larger precision
// because colnnzC is of length nzc and estimates nnzs per column
// @OGUZ-EDIT Using hash spgemm for estimation
//LIB * colnnzC = estimateNNZ(*ARecv, *BRecv);
LIB* flopC = estimateFLOP(*ARecv, *BRecv);
LIB* colnnzC = estimateNNZ_Hash(*ARecv, *BRecv, flopC);
if (flopC) delete [] flopC;
LIB nzc = BRecv->GetDCSC()->nzc;
int64_t nnzC_stage = 0;
#ifdef THREADED
#pragma omp parallel for reduction (+:nnzC_stage)
#endif
for (LIB k=0; k<nzc; k++)
{
nnzC_stage = nnzC_stage + colnnzC[k];
}
nnzC_SUMMA += nnzC_stage;
if(colnnzC) delete [] colnnzC;
// sampling-based estimation (comment the estimation above, and
// comment out below to use)
// int64_t nnzC_stage = estimateNNZ_sampling(*ARecv, *BRecv);
// nnzC_SUMMA += nnzC_stage;
// delete received data
if(i != Aself)
delete ARecv;
if(i != Bself)
delete BRecv;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
int64_t nnzC_SUMMA_max = 0;
MPI_Allreduce(&nnzC_SUMMA, &nnzC_SUMMA_max, 1, MPIType<int64_t>(), MPI_MAX, GridC->GetWorld());
return nnzC_SUMMA_max;
}
template <typename MATRIX, typename VECTOR>
void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x)
{
if(A.getncol() != x.TotalLength())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << x.TotalLength() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) )
{
std::cout << "Grids are not comparable for SpMV" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
}
}
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf);
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue)
{
typedef typename promote_trait<NUM,IU>::T_promote T_promote;
OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >();
return SpMV<SR>(A, x, indexisvalue, optbuf);
}
/**
* Step 1 of the sparse SpMV algorithm
* @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated }
* @param[in] indexisvalue
**/
template<typename IU, typename NV>
void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue)
{
int32_t xlocnz = (int32_t) x.getlocnnz();
int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t
int32_t roffset;
IU luntil = x.LengthUntil();
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status);
MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status);
MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status);
// ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible
// Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth
trxinds = new int32_t[trxlocnz];
int32_t * temp_xind = new int32_t[xlocnz];
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i< xlocnz; ++i)
temp_xind[i] = (int32_t) x.ind[i];
MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status);
delete [] temp_xind;
if(!indexisvalue)
{
trxnums = new NV[trxlocnz];
MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status);
}
std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces)
}
/**
* Step 2 of the sparse SpMV algorithm
* @param[in,out] trxinds, trxnums { deallocated }
* @param[in,out] indacc, numacc { allocated }
* @param[in,out] accnz { set }
* @param[in] trxlocnz, lenuntil, indexisvalue
**/
template<typename IU, typename NV>
void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums,
int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue)
{
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colnz = new int[colneighs];
colnz[colrank] = trxlocnz;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colnz, colnz+colneighs-1, dpls+1);
accnz = std::accumulate(colnz, colnz+colneighs, 0);
indacc = new int32_t[accnz];
numacc = new NV[accnz];
// ABAB: Future issues here, colnz is of type int (MPI limitation)
// What if the aggregate vector size along the processor row/column is not 32-bit addressible?
// This will happen when n/sqrt(p) > 2^31
// Currently we can solve a small problem (scale 32) with 4096 processor
// For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180
// 2^35 / 180 ~ 2^29 / 3 which is not an issue !
#ifdef TIMING
double t0=MPI_Wtime();
#endif
MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld);
delete [] trxinds;
if(indexisvalue)
{
IU lenuntilcol;
if(colrank == 0) lenuntilcol = lenuntil;
MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld);
for(int i=0; i< accnz; ++i) // fill numerical values from indices
{
numacc[i] = indacc[i] + lenuntilcol;
}
}
else
{
MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld);
delete [] trxnums;
}
#ifdef TIMING
double t1=MPI_Wtime();
cblas_allgathertime += (t1-t0);
#endif
DeleteAll(colnz,dpls);
}
/**
* Step 3 of the sparse SpMV algorithm, with the semiring
* @param[in,out] optbuf {scratch space for all-to-all (fold) communication}
* @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit}
* @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created}
**/
template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc,
int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue, PreAllocatedSPA<OVT> & SPA)
{
if(optbuf.totmax > 0) // graph500 optimization enabled
{
if(A.spSeq->getnsplit() > 0)
{
// optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded
generic_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs);
}
else
{
generic_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue);
}
DeleteAll(indacc,numacc);
}
else
{
if(A.spSeq->getnsplit() > 0)
{
// sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded
int totalsent = generic_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs, SPA);
DeleteAll(indacc, numacc);
for(int i=0; i<rowneighs-1; ++i)
sendcnt[i] = sdispls[i+1] - sdispls[i];
sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1];
}
else
{
// default SpMSpV
std::vector< int32_t > indy;
std::vector< OVT > numy;
generic_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy, SPA);
DeleteAll(indacc, numacc);
int32_t bufsize = indy.size(); // as compact as possible
sendindbuf = new int32_t[bufsize];
sendnumbuf = new OVT[bufsize];
int32_t perproc = A.getlocalrows() / rowneighs;
int k = 0; // index to buffer
for(int i=0; i<rowneighs; ++i)
{
int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc;
while(k < bufsize && indy[k] < end_this)
{
sendindbuf[k] = indy[k] - i*perproc;
sendnumbuf[k] = numy[k];
++sendcnt[i];
++k;
}
}
sdispls = new int[rowneighs]();
std::partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1);
//#endif
}
}
}
// non threaded
template <typename SR, typename IU, typename OVT>
void MergeContributions(int* listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU>& mergedind, std::vector<OVT>& mergednum)
{
int nlists = indsvec.size();
// this condition is checked in the caller SpMV function.
// I am still putting it here for completeness
if(nlists == 1)
{
// simply copy data
int veclen = listSizes[0];
mergedind.resize(veclen);
mergednum.resize(veclen);
for(int i=0; i<veclen; i++)
{
mergedind[i] = indsvec[0][i];
mergednum[i] = numsvec[0][i];
}
return;
}
int32_t hsize = 0;
int32_t inf = std::numeric_limits<int32_t>::min();
int32_t sup = std::numeric_limits<int32_t>::max();
KNHeap< int32_t, int32_t > sHeap(sup, inf);
int * processed = new int[nlists]();
for(int i=0; i<nlists; ++i)
{
if(listSizes[i] > 0)
{
// key, list_id
sHeap.insert(indsvec[i][0], i);
++hsize;
}
}
int32_t key, locv;
if(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
mergedind.push_back( static_cast<IU>(key));
mergednum.push_back(numsvec[locv][0]); // nothing is processed yet
if( (++(processed[locv])) < listSizes[locv] )
sHeap.insert(indsvec[locv][processed[locv]], locv);
else
--hsize;
}
while(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
if(mergedind.back() == static_cast<IU>(key))
{
mergednum.back() = SR::add(mergednum.back(), numsvec[locv][processed[locv]]);
// ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection
// We can just skip this addition operator (if it's a max/min select)
}
else
{
mergedind.push_back(static_cast<IU>(key));
mergednum.push_back(numsvec[locv][processed[locv]]);
}
if( (++(processed[locv])) < listSizes[locv] )
sHeap.insert(indsvec[locv][processed[locv]], locv);
else
--hsize;
}
DeleteAll(processed);
}
template <typename SR, typename IU, typename OVT>
void MergeContributions_threaded(int * & listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU> & mergedind, std::vector<OVT> & mergednum, IU maxindex)
{
int nlists = indsvec.size();
// this condition is checked in the caller SpMV function.
// I am still putting it here for completeness
if(nlists == 1)
{
// simply copy data
int veclen = listSizes[0];
mergedind.resize(veclen);
mergednum.resize(veclen);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<veclen; i++)
{
mergedind[i] = indsvec[0][i];
mergednum[i] = numsvec[0][i];
}
return;
}
int nthreads=1;
#ifdef THREADED
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
int nsplits = 4*nthreads; // oversplit for load balance
nsplits = std::min(nsplits, (int)maxindex);
std::vector< std::vector<int32_t> > splitters(nlists);
for(int k=0; k< nlists; k++)
{
splitters[k].resize(nsplits+1);
splitters[k][0] = static_cast<int32_t>(0);
#pragma omp parallel for
for(int i=1; i< nsplits; i++)
{
IU cur_idx = i * (maxindex/nsplits);
auto it = std::lower_bound (indsvec[k], indsvec[k] + listSizes[k], cur_idx);
splitters[k][i] = (int32_t) (it - indsvec[k]);
}
splitters[k][nsplits] = listSizes[k];
}
// ------ perform merge in parallel ------
std::vector<std::vector<IU>> indsBuf(nsplits);
std::vector<std::vector<OVT>> numsBuf(nsplits);
//TODO: allocate these vectors here before calling MergeContributions
#pragma omp parallel for schedule(dynamic)
for(int i=0; i< nsplits; i++)
{
std::vector<int32_t *> tIndsVec(nlists);
std::vector<OVT *> tNumsVec(nlists);
std::vector<int> tLengths(nlists);
for(int j=0; j< nlists; ++j)
{
tIndsVec[j] = indsvec[j] + splitters[j][i];
tNumsVec[j] = numsvec[j] + splitters[j][i];
tLengths[j]= splitters[j][i+1] - splitters[j][i];
}
MergeContributions<SR>(tLengths.data(), tIndsVec, tNumsVec, indsBuf[i], numsBuf[i]);
}
// ------ concatenate merged tuples processed by threads ------
std::vector<IU> tdisp(nsplits+1);
tdisp[0] = 0;
for(int i=0; i<nsplits; ++i)
{
tdisp[i+1] = tdisp[i] + indsBuf[i].size();
}
mergedind.resize(tdisp[nsplits]);
mergednum.resize(tdisp[nsplits]);
#pragma omp parallel for schedule(dynamic)
for(int i=0; i< nsplits; i++)
{
std::copy(indsBuf[i].data() , indsBuf[i].data() + indsBuf[i].size(), mergedind.data() + tdisp[i]);
std::copy(numsBuf[i].data() , numsBuf[i].data() + numsBuf[i].size(), mergednum.data() + tdisp[i]);
}
}
/**
* This version is the most flexible sparse matrix X sparse vector [Used in KDT]
* It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT)
* without relying on automatic type promotion
* Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x.
*/
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,
bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA)
{
CheckSpMVCompliance(A,x);
optbuf.MarkEmpty();
y.glen = A.getnrow(); // in case it is not set already
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int accnz;
int32_t trxlocnz;
IU lenuntil;
int32_t *trxinds, *indacc;
IVT *trxnums, *numacc;
#ifdef TIMING
double t0=MPI_Wtime();
#endif
TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue);
#ifdef TIMING
double t1=MPI_Wtime();
cblas_transvectime += (t1-t0);
#endif
if(x.commGrid->GetGridRows() > 1)
{
AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); // trxindS/trxnums deallocated, indacc/numacc allocated, accnz set
}
else
{
accnz = trxlocnz;
indacc = trxinds; // aliasing ptr
numacc = trxnums; // aliasing ptr
}
int rowneighs;
MPI_Comm_size(RowWorld, &rowneighs);
int * sendcnt = new int[rowneighs]();
int32_t * sendindbuf;
OVT * sendnumbuf;
int * sdispls;
#ifdef TIMING
double t2=MPI_Wtime();
#endif
LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue, SPA); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated
#ifdef TIMING
double t3=MPI_Wtime();
cblas_localspmvtime += (t3-t2);
#endif
if(x.commGrid->GetGridCols() == 1)
{
y.ind.resize(sendcnt[0]);
y.num.resize(sendcnt[0]);
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<sendcnt[0]; i++)
{
y.ind[i] = optbuf.inds[i];
y.num[i] = optbuf.nums[i];
}
}
else
{
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<sendcnt[0]; i++)
{
y.ind[i] = sendindbuf[i];
y.num[i] = sendnumbuf[i];
}
DeleteAll(sendindbuf, sendnumbuf,sdispls);
}
delete [] sendcnt;
return;
}
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
// receive displacements are exact whereas send displacements have slack
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
int32_t * recvindbuf = new int32_t[totrecv];
OVT * recvnumbuf = new OVT[totrecv];
#ifdef TIMING
double t4=MPI_Wtime();
#endif
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld);
delete [] sendcnt;
}
else
{
MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld);
DeleteAll(sendindbuf, sendnumbuf, sendcnt, sdispls);
}
#ifdef TIMING
double t5=MPI_Wtime();
cblas_alltoalltime += (t5-t4);
#endif
#ifdef TIMING
double t6=MPI_Wtime();
#endif
//MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs);
// free memory of y, in case it was aliased
std::vector<IU>().swap(y.ind);
std::vector<OVT>().swap(y.num);
std::vector<int32_t *> indsvec(rowneighs);
std::vector<OVT *> numsvec(rowneighs);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<rowneighs; i++)
{
indsvec[i] = recvindbuf+rdispls[i];
numsvec[i] = recvnumbuf+rdispls[i];
}
#ifdef THREADED
MergeContributions_threaded<SR>(recvcnt, indsvec, numsvec, y.ind, y.num, y.MyLocLength());
#else
MergeContributions<SR>(recvcnt, indsvec, numsvec, y.ind, y.num);
#endif
DeleteAll(recvcnt, rdispls,recvindbuf, recvnumbuf);
#ifdef TIMING
double t7=MPI_Wtime();
cblas_mergeconttime += (t7-t6);
#endif
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, PreAllocatedSPA<OVT> & SPA)
{
OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >();
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue)
{
OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >();
PreAllocatedSPA<OVT> SPA;
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf)
{
PreAllocatedSPA<OVT> SPA;
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
/**
* Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type
* If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors)
**/
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf)
{
typedef typename promote_trait<NUM,IU>::T_promote T_promote;
FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors
SpMV<SR>(A, x, y, indexisvalue, optbuf);
return y;
}
/**
* Parallel dense SpMV
**/
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x )
{
typedef typename promote_trait<NUM,NUV>::T_promote T_promote;
CheckSpMVCompliance(A, x);
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int xsize = (int) x.LocArrSize();
int trxsize = 0;
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status);
NUV * trxnums = new NUV[trxsize];
MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status);
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colsize = new int[colneighs];
colsize[colrank] = trxsize;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colsize, colsize+colneighs-1, dpls+1);
int accsize = std::accumulate(colsize, colsize+colneighs, 0);
NUV * numacc = new NUV[accsize];
MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld);
delete [] trxnums;
// serial SpMV with dense vector
T_promote id = SR::id();
IU ysize = A.getlocalrows();
T_promote * localy = new T_promote[ysize];
std::fill_n(localy, ysize, id);
#ifdef THREADED
dcsc_gespmv_threaded<SR>(*(A.spSeq), numacc, localy);
#else
dcsc_gespmv<SR>(*(A.spSeq), numacc, localy);
#endif
DeleteAll(numacc,colsize, dpls);
// FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id)
FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id);
int rowneighs;
MPI_Comm_size(RowWorld, &rowneighs);
IU begptr, endptr;
for(int i=0; i< rowneighs; ++i)
{
begptr = y.RowLenUntil(i);
if(i == rowneighs-1)
{
endptr = ysize;
}
else
{
endptr = y.RowLenUntil(i+1);
}
MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld);
}
delete [] localy;
return y;
}
/**
* \TODO: Old version that is no longer considered optimal
* Kept for legacy purposes
* To be removed when other functionals are fully tested.
**/
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x)
{
typedef typename promote_trait<NUM,NUV>::T_promote T_promote;
CheckSpMVCompliance(A, x);
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int xlocnz = (int) x.getlocnnz();
int trxlocnz = 0;
int roffst = x.RowLenUntil();
int offset;
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status);
MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status);
IU * trxinds = new IU[trxlocnz];
NUV * trxnums = new NUV[trxlocnz];
MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status);
MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status);
std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces)
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colnz = new int[colneighs];
colnz[colrank] = trxlocnz;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colnz, colnz+colneighs-1, dpls+1);
int accnz = std::accumulate(colnz, colnz+colneighs, 0);
IU * indacc = new IU[accnz];
NUV * numacc = new NUV[accnz];
// ABAB: Future issues here, colnz is of type int (MPI limitation)
// What if the aggregate vector size along the processor row/column is not 32-bit addressible?
MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld);
MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld);
DeleteAll(trxinds, trxnums);
// serial SpMV with sparse vector
std::vector< int32_t > indy;
std::vector< T_promote > numy;
int32_t * tmpindacc = new int32_t[accnz];
for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i];
delete [] indacc;
dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication
DeleteAll(tmpindacc, numacc);
DeleteAll(colnz, dpls);
FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors
IU yintlen = y.MyRowLength();
int rowneighs;
MPI_Comm_size(RowWorld,&rowneighs);
std::vector< std::vector<IU> > sendind(rowneighs);
std::vector< std::vector<T_promote> > sendnum(rowneighs);
typename std::vector<int32_t>::size_type outnz = indy.size();
for(typename std::vector<IU>::size_type i=0; i< outnz; ++i)
{
IU locind;
int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind);
sendind[rown].push_back(locind);
sendnum[rown].push_back(numy[i]);
}
IU * sendindbuf = new IU[outnz];
T_promote * sendnumbuf = new T_promote[outnz];
int * sendcnt = new int[rowneighs];
int * sdispls = new int[rowneighs];
for(int i=0; i<rowneighs; ++i)
sendcnt[i] = sendind[i].size();
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
IU * recvindbuf = new IU[totrecv];
T_promote * recvnumbuf = new T_promote[totrecv];
for(int i=0; i<rowneighs; ++i)
{
std::copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]);
std::vector<IU>().swap(sendind[i]);
}
for(int i=0; i<rowneighs; ++i)
{
std::copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]);
std::vector<T_promote>().swap(sendnum[i]);
}
MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld);
MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld);
DeleteAll(sendindbuf, sendnumbuf);
DeleteAll(sendcnt, recvcnt, sdispls, rdispls);
// define a SPA-like data structure
IU ysize = y.MyLocLength();
T_promote * localy = new T_promote[ysize];
bool * isthere = new bool[ysize];
std::vector<IU> nzinds; // nonzero indices
std::fill_n(isthere, ysize, false);
for(int i=0; i< totrecv; ++i)
{
if(!isthere[recvindbuf[i]])
{
localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment
nzinds.push_back(recvindbuf[i]);
isthere[recvindbuf[i]] = true;
}
else
{
localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]);
}
}
DeleteAll(isthere, recvindbuf, recvnumbuf);
sort(nzinds.begin(), nzinds.end());
int nnzy = nzinds.size();
y.ind.resize(nnzy);
y.num.resize(nnzy);
for(int i=0; i< nnzy; ++i)
{
y.ind[i] = nzinds[i];
y.num[i] = localy[nzinds[i]];
}
delete [] localy;
return y;
}
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude)
{
typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote;
if(*(A.commGrid) == *(B.commGrid))
{
DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) );
return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise multiplication" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,N_promote,DER_promote >();
}
}
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation>
SpParMat<IU,RETT,RETDER> EWiseApply
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal)
{
if(*(A.commGrid) == *(B.commGrid))
{
RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) );
return SpParMat<IU, RETT, RETDER> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise apply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,RETT,RETDER >();
}
}
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate>
SpParMat<IU,RETT,RETDER> EWiseApply
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp)
{
if(*(A.commGrid) == *(B.commGrid))
{
RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) );
return SpParMat<IU, RETT, RETDER> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise apply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,RETT,RETDER >();
}
}
// plain adapter
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate>
SpParMat<IU,RETT,RETDER>
EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true)
{
return EWiseApply<RETT, RETDER>(A, B,
EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op),
allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true);
}
// end adapter
/**
* if exclude is true, then we prune all entries W[i] != zero from V
* if exclude is false, then we perform a proper elementwise multiplication
**/
template <typename IU, typename NU1, typename NU2>
FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero)
{
typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.glen != W.glen)
{
std::cerr << "Vector dimensions don't match for EWiseMult\n";
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
IU size= V.getlocnnz();
if(exclude)
{
#if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial
int actual_splits = cblas_splits * 1; // 1 is the parallel slackness
std::vector <IU> tlosizes (actual_splits, 0);
std::vector < std::vector<IU> > tlinds(actual_splits);
std::vector < std::vector<T_promote> > tlnums(actual_splits);
IU tlsize = size / actual_splits;
#pragma omp parallel for //schedule(dynamic, 1)
for(IU t = 0; t < actual_splits; ++t)
{
IU tlbegin = t*tlsize;
IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize;
for(IU i=tlbegin; i<tlend; ++i)
{
if(W.arr[V.ind[i]] == zero) // keep only those
{
tlinds[t].push_back(V.ind[i]);
tlnums[t].push_back(V.num[i]);
tlosizes[t]++;
}
}
}
std::vector<IU> prefix_sum(actual_splits+1,0);
std::partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1);
Product.ind.resize(prefix_sum[actual_splits]);
Product.num.resize(prefix_sum[actual_splits]);
#pragma omp parallel for //schedule(dynamic, 1)
for(IU t=0; t< actual_splits; ++t)
{
std::copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]);
std::copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]);
}
#else
for(IU i=0; i<size; ++i)
{
if(W.arr[V.ind[i]] == zero) // keep only those
{
Product.ind.push_back(V.ind[i]);
Product.num.push_back(V.num[i]);
}
}
#endif
}
else
{
for(IU i=0; i<size; ++i)
{
if(W.arr[V.ind[i]] != zero) // keep only those
{
Product.ind.push_back(V.ind[i]);
Product.num.push_back(V.num[i] * W.arr[V.ind[i]]);
}
}
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable elementwise multiplication" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
/**
Threaded EWiseApply. Only called internally from EWiseApply.
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply_threaded
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp)
{
typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.TotalLength() != W.TotalLength())
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
int nthreads=1;
#ifdef _OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
Product.glen = V.glen;
IU size= W.LocArrSize();
IU spsize = V.getlocnnz();
// temporary result vectors per thread
std::vector<std::vector<IU>> tProductInd(nthreads);
std::vector<std::vector<T_promote>> tProductVal(nthreads);
IU perthread; //chunk of tProductInd or tProductVal allocated to each thread
if (allowVNulls)
perthread = size/nthreads;
else
perthread = spsize/nthreads;
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int curthread = 0;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
IU tStartIdx = perthread * curthread;
IU tNextIdx = perthread * (curthread+1);
if (allowVNulls)
{
if(curthread == nthreads-1) tNextIdx = size;
// get sparse part for the current thread
auto it = std::lower_bound (V.ind.begin(), V.ind.end(), tStartIdx);
IU tSpIdx = (IU) std::distance(V.ind.begin(), it);
// iterate over the dense vector
for(IU tIdx=tStartIdx; tIdx < tNextIdx; ++tIdx)
{
if(tSpIdx < spsize && V.ind[tSpIdx] < tNextIdx && V.ind[tSpIdx] == tIdx)
{
if (_doOp(V.num[tSpIdx], W.arr[tIdx], false, false))
{
tProductInd[curthread].push_back(tIdx);
tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[tIdx], false, false));
}
tSpIdx++;
}
else
{
if (_doOp(Vzero, W.arr[tIdx], true, false))
{
tProductInd[curthread].push_back(tIdx);
tProductVal[curthread].push_back (_binary_op(Vzero, W.arr[tIdx], true, false));
}
}
}
}
else // iterate over the sparse vector
{
if(curthread == nthreads-1) tNextIdx = spsize;
for(IU tSpIdx=tStartIdx; tSpIdx < tNextIdx; ++tSpIdx)
{
if (_doOp(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false))
{
tProductInd[curthread].push_back( V.ind[tSpIdx]);
tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false));
}
}
}
}
std::vector<IU> tdisp(nthreads+1);
tdisp[0] = 0;
for(int i=0; i<nthreads; ++i)
{
tdisp[i+1] = tdisp[i] + tProductInd[i].size();
}
// copy results from temporary vectors
Product.ind.resize(tdisp[nthreads]);
Product.num.resize(tdisp[nthreads]);
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int curthread = 0;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
std::copy(tProductInd[curthread].begin(), tProductInd[curthread].end(), Product.ind.data() + tdisp[curthread]);
std::copy(tProductVal[curthread].begin() , tProductVal[curthread].end(), Product.num.data() + tdisp[curthread]);
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
/**
* Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret.
* The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not
* performed and ret does not contain an element at that position.
* More formally the operation is defined as:
* if (_doOp(V[i], W[i]))
* ret[i] = _binary_op(V[i], W[i])
* else
* // ret[i] is not set
* Hence _doOp can be used to implement a filter on either of the vectors.
*
* The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does)
* the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value.
*
* The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter:
* FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0)
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp)
{
#ifdef _OPENMP
return EWiseApply_threaded<RET>(V, W, _binary_op, _doOp, allowVNulls, Vzero, useExtendedBinOp);
#else
typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
//FullyDistVec< IU, NU1> DV (V); // Ariful: I am not sure why it was there??
if(V.TotalLength() != W.TotalLength())
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
IU size= W.LocArrSize();
IU spsize = V.getlocnnz();
IU sp_iter = 0;
if (allowVNulls)
{
// iterate over the dense vector
for(IU i=0; i<size; ++i)
{
if(sp_iter < spsize && V.ind[sp_iter] == i)
{
if (_doOp(V.num[sp_iter], W.arr[i], false, false))
{
Product.ind.push_back(i);
Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false));
}
sp_iter++;
}
else
{
if (_doOp(Vzero, W.arr[i], true, false))
{
Product.ind.push_back(i);
Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false));
}
}
}
}
else
{
// iterate over the sparse vector
for(sp_iter = 0; sp_iter < spsize; ++sp_iter)
{
if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false))
{
Product.ind.push_back(V.ind[sp_iter]);
Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false));
}
}
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
#endif
}
/**
* Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret.
* The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not
* performed and ret does not contain an element at that position.
* More formally the operation is defined as:
* if (_doOp(V[i], W[i]))
* ret[i] = _binary_op(V[i], W[i])
* else
* // ret[i] is not set
* Hence _doOp can be used to implement a filter on either of the vectors.
*
* The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does)
* the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value.
* !allowVNulls && !allowWNulls => intersection
* !allowVNulls && allowWNulls => operate on all elements of V
* allowVNulls && !allowWNulls => operate on all elements of W
* allowVNulls && allowWNulls => union
*
* The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter:
* FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, ...)
* For intersection, Vzero and Wzero are irrelevant
* ABAB: \todo: Should allowIntersect be "false" for all SetDifference uses?
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp)
{
typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.glen != W.glen)
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
typename std::vector< IU >::const_iterator indV = V.ind.begin();
typename std::vector< NU1 >::const_iterator numV = V.num.begin();
typename std::vector< IU >::const_iterator indW = W.ind.begin();
typename std::vector< NU2 >::const_iterator numW = W.num.begin();
while (indV < V.ind.end() && indW < W.ind.end())
{
if (*indV == *indW)
{
// overlap
if (allowIntersect)
{
if (_doOp(*numV, *numW, false, false))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, *numW, false, false));
}
}
indV++; numV++;
indW++; numW++;
}
else if (*indV < *indW)
{
// V has value but W does not
if (allowWNulls)
{
if (_doOp(*numV, Wzero, false, true))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, Wzero, false, true));
}
}
indV++; numV++;
}
else //(*indV > *indW)
{
// W has value but V does not
if (allowVNulls)
{
if (_doOp(Vzero, *numW, true, false))
{
Product.ind.push_back(*indW);
Product.num.push_back(_binary_op(Vzero, *numW, true, false));
}
}
indW++; numW++;
}
}
// clean up
while (allowWNulls && indV < V.ind.end())
{
if (_doOp(*numV, Wzero, false, true))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, Wzero, false, true));
}
indV++; numV++;
}
while (allowVNulls && indW < W.ind.end())
{
if (_doOp(Vzero, *numW, true, false))
{
Product.ind.push_back(*indW);
Product.num.push_back(_binary_op(Vzero, *numW, true, false));
}
indW++; numW++;
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
// plain callback versions
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero)
{
return EWiseApply<RET>(V, W,
EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp),
allowVNulls, Vzero, true);
}
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true)
{
return EWiseApply<RET>(V, W,
EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp),
allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// sampling-based nnz estimation via SpMV
// @OGUZ-NOTE This is not based on SUMMA, do not use. Estimates the number of
// nonzeros in the final output matrix.
#define NROUNDS 5
typedef std::array<float, NROUNDS> samparr_t;
template <typename NZT>
struct promote_trait<NZT, samparr_t>
{
typedef samparr_t T_promote;
};
class SamplesSaveHandler
{
public:
template<typename c, typename t, typename V>
void save(std::basic_ostream<c, t> &os,
std::array<V, NROUNDS> &sample_vec,
int64_t index)
{
for (auto it = sample_vec.begin(); it != sample_vec.end(); ++it)
os << *it << " ";
}
};
template<typename NZT>
struct SelectMinxSR
{
static samparr_t id()
{
samparr_t arr;
for (auto it = arr.begin(); it != arr.end(); ++it)
*it = std::numeric_limits<float>::max();
return arr;
}
static bool returnedSAID()
{
return false;
}
static samparr_t
add (const samparr_t &arg1, const samparr_t &arg2)
{
samparr_t out;
for (int i = 0; i < NROUNDS; ++i)
out[i] = std::min(arg1[i], arg2[i]);
return out;
}
static samparr_t
multiply (const NZT arg1, const samparr_t &arg2)
{
return arg2;
}
static void axpy (const NZT a, const samparr_t &x, samparr_t &y)
{
y = add(y, multiply(a, x));
}
static MPI_Op mpi_op()
{
static MPI_Op mpiop;
static bool exists = false;
if (exists)
return mpiop;
else
{
MPI_Op_create(MPI_func, true, &mpiop);
exists = true;
return mpiop;
}
}
static void
MPI_func(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
{
samparr_t *in = static_cast<samparr_t *>(invec);
samparr_t *inout = static_cast<samparr_t *>(inoutvec);
for (int i = 0; i < *len; ++i)
inout[i] = add(inout[i], in[i]);
}
};
template <typename IU, typename NU1, typename NU2,
typename UDERA, typename UDERB>
int64_t
EstPerProcessNnzSpMV(
SpParMat<IU, NU1, UDERA> &A, SpParMat<IU, NU2, UDERB> &B
)
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
float lambda = 1.0f;
int nthds = 1;
#ifdef THREADED
#pragma omp parallel
#endif
{
nthds = omp_get_num_threads();
}
if (myrank == 0)
std::cout << "taking transposes." << std::endl;
A.Transpose();
B.Transpose();
if (myrank == 0)
std::cout << "setting initial samples." << std::endl;
samparr_t sa;
FullyDistVec<IU, samparr_t> samples_init(A.getcommgrid(), A.getncol(), sa);
#ifdef THREADED
#pragma omp parallel
#endif
{
std::default_random_engine gen;
std::exponential_distribution<float> exp_dist(lambda);
#ifdef THREADED
#pragma omp parallel for
#endif
for (IU i = 0; i < samples_init.LocArrSize(); ++i)
{
samparr_t tmp;
for (auto it = tmp.begin(); it != tmp.end(); ++it)
*it = exp_dist(gen);
samples_init.SetLocalElement(i, tmp);
}
}
// std::string fname("samples_init");
// samples_init.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing mid samples." << std::endl;
FullyDistVec<IU, samparr_t> samples_mid =
SpMV<SelectMinxSR<NU1> > (A, samples_init);
// fname = "samples_mid";
// samples_mid.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing final samples." << std::endl;
FullyDistVec<IU, samparr_t> samples_final =
SpMV<SelectMinxSR<NU2> > (B, samples_mid);
// fname = "samples_final";
// samples_final.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing nnz estimation." << std::endl;
float nnzest = 0.0f;
std::cout << myrank << "samples_final loc size: "
<< samples_final.LocArrSize() << std::endl;
const samparr_t *lsamples = samples_final.GetLocArr();
#ifdef THREADED
#pragma omp parallel for reduction (+:nnzest)
#endif
for (IU i = 0; i < samples_final.LocArrSize(); ++i)
{
float tmp = 0.0f;
for (auto it = lsamples[i].begin(); it != lsamples[i].end(); ++it)
tmp += *it;
nnzest += static_cast<float>(NROUNDS - 1) / tmp;
}
if (myrank == 0)
std::cout << "taking transposes again." << std::endl;
int64_t nnzC_est = nnzest;
int64_t nnzC_tot = 0;
MPI_Allreduce(&nnzC_est, &nnzC_tot, 1, MPIType<int64_t>(), MPI_SUM,
(B.commGrid)->GetWorld());
if (myrank == 0)
std::cout << "sampling-based spmv est tot: " << nnzC_tot << std::endl;
// revert back
A.Transpose();
B.Transpose();
return nnzC_tot;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
}
#endif
|
GB_binop__land_bool.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__land_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__land_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__land_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_bool)
// A*D function (colscale): GB (_AxD__land_bool)
// D*A function (rowscale): GB (_DxB__land_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__land_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__land_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_bool)
// C=scalar+B GB (_bind1st__land_bool)
// C=scalar+B' GB (_bind1st_tran__land_bool)
// C=A+scalar GB (_bind2nd__land_bool)
// C=A'+scalar GB (_bind2nd_tran__land_bool)
// C type: bool
// A type: bool
// A pattern? 0
// B type: bool
// B pattern? 0
// BinaryOp: cij = (aij && bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x && y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_BOOL || GxB_NO_LAND_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_bool)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
bool alpha_scalar ;
bool beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((bool *) alpha_scalar_in)) ;
beta_scalar = (*((bool *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x && bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij && y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x && aij) ; \
}
GrB_Info GB (_bind1st_tran__land_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij && y) ; \
}
GrB_Info GB (_bind2nd_tran__land_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pair_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__pair_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__pair_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pair_fp64)
// A*D function (colscale): GB (_AxD__pair_fp64)
// D*A function (rowscale): GB (_DxB__pair_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = 1
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__pair_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__pair_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pair_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pair_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pair_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pair_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 10000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
DenseAffine.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <random>
#include "bb/DataType.h"
#include "bb/Model.h"
#ifdef BB_WITH_CUDA
#include "cuda_runtime.h"
#include "cublas_v2.h"
#include "bbcu/bbcu.h"
#endif
namespace bb {
// Affineレイヤー
template <typename T = float>
class DenseAffine : public Model
{
using _super = Model;
public:
static inline std::string ModelName(void) { return "DenseAffine"; }
static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<T>::Name(); }
std::string GetModelName(void) const override { return ModelName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
bool m_binary_mode = false;
bool m_backward_break = false;
T m_initialize_std = (T)0.01;
std::string m_initializer = "";
std::mt19937_64 m_mt;
index_t m_input_node_size = 0;
indices_t m_input_shape;
index_t m_output_node_size = 0;
indices_t m_output_shape;
std::shared_ptr<Tensor> m_W;
std::shared_ptr<Tensor> m_b;
std::shared_ptr<Tensor> m_dW;
std::shared_ptr<Tensor> m_db;
bool m_cublasEnable = false;
#ifdef BB_WITH_CUDA
cublasHandle_t m_cublasHandle;
#endif
public:
struct create_t
{
indices_t output_shape;
T initialize_std = (T)0.01;
std::string initializer = "";
std::uint64_t seed = 1;
};
protected:
DenseAffine(create_t const &create)
{
m_W = std::make_shared<Tensor>();
m_b = std::make_shared<Tensor>();
m_dW = std::make_shared<Tensor>();
m_db = std::make_shared<Tensor>();
#ifdef BB_WITH_CUDA
if ( cublasCreate(&m_cublasHandle) == CUBLAS_STATUS_SUCCESS ) {
m_cublasEnable = true;
}
#endif
// BB_ASSERT(!create.output_shape.empty());
m_initialize_std = create.initialize_std;
m_initializer = create.initializer;
m_mt.seed(create.seed);
m_output_shape = create.output_shape;
m_output_node_size = CalcShapeSize(m_output_shape);
}
void CommandProc(std::vector<std::string> args)
{
_super::CommandProc(args);
// バイナリモード設定
if ( args.size() == 2 && args[0] == "binary" )
{
m_binary_mode = EvalBool(args[1]);
}
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
if (args.size() == 2 && args[0] == "backward_break")
{
m_backward_break = EvalBool(args[1]);
}
}
public:
~DenseAffine() {
#ifdef BB_WITH_CUDA
if ( m_cublasEnable ) {
BB_CUBLAS_SAFE_CALL(cublasDestroy(m_cublasHandle));
m_cublasEnable = false;
}
#endif
}
static std::shared_ptr<DenseAffine> Create(create_t const &create)
{
return std::shared_ptr<DenseAffine>(new DenseAffine(create));
}
static std::shared_ptr<DenseAffine> Create(indices_t const &output_shape)
{
create_t create;
create.output_shape = output_shape;
return Create(create);
}
static std::shared_ptr<DenseAffine> Create(index_t output_node_size)
{
create_t create;
create.output_shape.resize(1);
create.output_shape[0] = output_node_size;
return Create(create);
}
static std::shared_ptr<DenseAffine> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11
static std::shared_ptr<DenseAffine> CreatePy(
indices_t output_shape,
T initialize_std = (T)0.01,
std::string initializer = "",
std::uint64_t seed = 1
)
{
create_t create;
create.output_shape = output_shape;
create.initialize_std = initialize_std;
create.initializer = initializer;
create.seed = seed;
return Create(create);
}
#endif
Tensor &W(void) { return *m_W; }
Tensor const &W(void) const { return *m_W; }
Tensor &b(void) { return *m_b; }
Tensor const &b(void) const { return *m_b; }
Tensor &dW(void) { return *m_dW; }
Tensor const &dW(void) const { return *m_dW; }
Tensor &db(void) { return *m_db; }
Tensor const &db(void) const { return *m_db; }
auto lock_W(void) { return m_W->Lock<T>(); }
auto lock_W_const(void) const { return m_W->LockConst<T>(); }
auto lock_b(void) { return m_b->Lock<T>(); }
auto lock_b_const(void) const { return m_b->LockConst<T>(); }
auto lock_dW(void) { return m_dW->Lock<T>(); }
auto lock_dW_const(void) const { return m_dW->LockConst<T>(); }
auto lock_db(void) { return m_db->Lock<T>(); }
auto lock_db_const(void) const { return m_db->LockConst<T>(); }
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape)
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
// 形状設定
m_input_shape = shape;
m_input_node_size = CalcShapeSize(shape);
// パラメータ初期化
m_W->Resize ({m_output_node_size, m_input_node_size}, DataType<T>::type);
m_b->Resize ({m_output_node_size}, DataType<T>::type);
m_dW->Resize({m_output_node_size, m_input_node_size}, DataType<T>::type);
m_db->Resize({m_output_node_size}, DataType<T>::type);
if (m_initializer == "he" || m_initializer == "He") {
m_initialize_std = (T)std::sqrt(2.0 / (double)m_input_node_size);
m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "xavier" || m_initializer == "Xavier" ) {
m_initialize_std = (T)std::sqrt(1.0 / (double)m_input_node_size);
m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "normal" || m_initializer == "Normal" ) {
m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt());
m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt());
}
else if (m_initializer == "uniform" || m_initializer == "Uniform" ) {
double k = (double)m_initialize_std * std::sqrt(3.0);
m_W->InitUniformDistribution(-k, +k, m_mt());
m_b->InitUniformDistribution(-k, +k, m_mt());
}
else {
double k = std::sqrt(1.0 / (double)m_input_node_size);
m_W->InitUniformDistribution(-k, +k, m_mt());
m_b->InitUniformDistribution(-k, +k, m_mt());
}
m_dW->FillZero();
m_db->FillZero();
return m_output_shape;
}
/**
* @brief 出力のshape設定
* @detail 出力のshape設定
* 出力ノード数が変わらない限りshpeは自由
* @param shape 新しいshape
* @return なし
*/
void SetOutputShape(indices_t const &shape)
{
BB_ASSERT(CalcShapeSize(shape) == m_input_node_size);
m_output_shape = shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const
{
return m_output_shape;
}
Variables GetParameters(void) override
{
Variables parameters;
if ( !this->m_parameter_lock ) {
parameters.PushBack(m_W);
parameters.PushBack(m_b);
}
return parameters;
}
Variables GetGradients(void) override
{
Variables gradients;
if ( !this->m_parameter_lock ) {
gradients.PushBack(m_dW);
gradients.PushBack(m_db);
}
return gradients;
}
FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
// backwardの為に保存
if ( train ) {
this->PushFrameBuffer(x_buf);
}
// 型合わせ
if ( x_buf.GetType() != DataType<T>::type ) {
x_buf = x_buf.ConvertTo(DataType<T>::type);
}
BB_ASSERT(x_buf.GetType() == DataType<T>::type);
BB_ASSERT(x_buf.GetNodeSize() == m_input_node_size);
// SetInputShpaeされていなければ初回に設定
if (x_buf.GetNodeSize() != m_input_node_size) {
SetInputShape(x_buf.GetShape());
}
// 出力を設定
FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<T>::type);
#ifdef BB_WITH_CUDA
if (DataType<T>::type == BB_TYPE_FP32 && m_cublasEnable && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable())
{
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto W_ptr = m_W->LockDeviceMemoryConst();
auto b_ptr = m_b->LockDeviceMemoryConst();
bbcu_fp32_MatrixRowwiseSetVector
(
(float const *)b_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(float))
);
float alpha = 1.0f;
float beta = 1.0f;
BB_CUBLAS_SAFE_CALL(cublasSgemm
(
m_cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_N,
(int)y_buf.GetFrameSize(),
(int)y_buf.GetNodeSize(),
(int)x_buf.GetNodeSize(),
&alpha,
(const float *)x_ptr.GetAddr(),
(int)(x_buf.GetFrameStride() / sizeof(float)),
(const float *)W_ptr.GetAddr(),
(int)x_buf.GetNodeSize(),
&beta,
(float *)y_ptr.GetAddr(),
(int)(y_buf.GetFrameStride() / sizeof(float))
));
return y_buf;
}
#endif
{
auto frame_size = x_buf.GetFrameSize();
auto x_ptr = x_buf.LockConst<T>();
auto y_ptr = y_buf.Lock<T>();
auto W_ptr = lock_W_const();
auto b_ptr = lock_b_const();
#pragma omp parallel for
for (index_t frame = 0; frame < frame_size; ++frame) {
for (index_t output_node = 0; output_node < m_output_node_size; ++output_node) {
y_ptr.Set(frame, output_node, b_ptr(output_node));
for (index_t input_node = 0; input_node < m_input_node_size; ++input_node) {
y_ptr.Add(frame, output_node, x_ptr.Get(frame, input_node) * W_ptr(output_node, input_node));
}
}
}
return y_buf;
}
}
FrameBuffer Backward(FrameBuffer dy_buf) override
{
if ( dy_buf.Empty() || m_backward_break ) {
m_dW = 0;
m_db = 0;
return FrameBuffer();
}
BB_ASSERT(dy_buf.GetType() == DataType<T>::type);
// フレーム数
auto frame_size = dy_buf.GetFrameSize();
// forward時保存復帰
FrameBuffer x_buf = PopFrameBuffer();
BB_ASSERT(x_buf.GetFrameSize() == dy_buf.GetFrameSize());
// 型合わせ
if ( x_buf.GetType() != DataType<T>::type ) {
x_buf = x_buf.ConvertTo(DataType<T>::type);
}
FrameBuffer dx_buf(x_buf.GetFrameSize(), x_buf.GetShape(), DataType<T>::type);
#ifdef BB_WITH_CUDA
if (DataType<T>::type == BB_TYPE_FP32 && m_cublasEnable && dy_buf.IsDeviceAvailable() && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable())
{
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto W_ptr = m_W->LockDeviceMemoryConst();
auto b_ptr = m_b->LockDeviceMemoryConst();
auto dW_ptr = m_dW->LockDeviceMemory();
auto db_ptr = m_db->LockDeviceMemory();
bbcu_fp32_MatrixColwiseSum
(
(float const *)dy_ptr.GetAddr(),
(float *)db_ptr.GetAddr(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(dy_buf.GetFrameStride() / sizeof(float))
);
float alpha = 1.0f;
float beta = 0.0f;
BB_CUBLAS_SAFE_CALL(cublasSgemm
(
m_cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_T,
(int)dx_buf.GetFrameSize(),
(int)dx_buf.GetNodeSize(),
(int)dy_buf.GetNodeSize(),
&alpha,
(const float *)dy_ptr.GetAddr(),
(int)(dy_buf.GetFrameStride() / sizeof(float)),
(const float *)W_ptr.GetAddr(),
(int)dx_buf.GetNodeSize(),
&beta,
(float *)dx_ptr.GetAddr(),
(int)(dx_buf.GetFrameStride() / sizeof(float))
));
beta = 1.0f;
BB_CUBLAS_SAFE_CALL(cublasSgemm
(
m_cublasHandle,
CUBLAS_OP_T,
CUBLAS_OP_N,
(int)dx_buf.GetNodeSize(),
(int)dy_buf.GetNodeSize(),
(int)dx_buf.GetFrameSize(),
&alpha,
(const float *)x_ptr.GetAddr(),
(int)(x_buf.GetFrameStride() / sizeof(float)),
(const float *)dy_ptr.GetAddr(),
(int)(dy_buf.GetFrameStride() / sizeof(float)),
&beta,
(float *)dW_ptr.GetAddr(),
(int)dx_buf.GetNodeSize()
));
return dx_buf;
}
#endif
{
dx_buf.FillZero();
auto x_ptr = x_buf.LockConst<T>();
auto dy_ptr = dy_buf.LockConst<T>();
auto dx_ptr = dx_buf.Lock<T>();
auto W_ptr = lock_W_const();
auto b_ptr = lock_b_const();
auto dW_ptr = lock_dW();
auto db_ptr = lock_db();
#pragma omp parallel for
for (index_t frame = 0; frame < frame_size; ++frame) {
for (index_t output_node = 0; output_node < m_output_node_size; ++output_node) {
auto grad = dy_ptr.Get(frame, output_node);
db_ptr(output_node) += grad;
for (index_t input_node = 0; input_node < m_input_node_size; ++input_node) {
dx_ptr.Add(frame, input_node, grad * W_ptr(output_node, input_node));
dW_ptr(output_node, input_node) += grad * x_ptr.Get(frame, input_node);
}
}
}
return dx_buf;
}
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_binary_mode);
bb::SaveValue(os, m_initialize_std);
bb::SaveValue(os, m_initializer);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
bb::SaveValue(os, m_cublasEnable);
m_W->DumpObject(os);
m_b->DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
#ifdef BB_WITH_CUDA
if ( m_cublasEnable ) {
BB_CUBLAS_SAFE_CALL(cublasDestroy(m_cublasHandle));
m_cublasEnable = false;
}
#endif
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_binary_mode);
bb::LoadValue(is, m_initialize_std);
bb::LoadValue(is, m_initializer);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
bb::LoadValue(is, m_cublasEnable);
m_W->LoadObject(is);
m_b->LoadObject(is);
// 再構築
#ifdef BB_WITH_CUDA
if ( m_cublasEnable ) {
if ( cublasCreate(&m_cublasHandle) != CUBLAS_STATUS_SUCCESS ) {
m_cublasEnable = false;
}
}
#endif
m_input_node_size = CalcShapeSize(m_input_shape);
m_output_node_size = CalcShapeSize(m_output_shape);
m_dW->Resize({m_output_node_size, m_input_node_size}, DataType<T>::type); m_dW->FillZero();
m_db->Resize({m_output_node_size}, DataType<T>::type); m_db->FillZero();
}
public:
// Serialize(旧)
void Save(std::ostream &os) const
{
// SaveValue(os, m_binary_mode);
os.write((const char*)&m_binary_mode, sizeof(m_binary_mode)); // バグに対する後方互換性
SaveIndices(os, m_input_shape);
SaveIndices(os, m_output_shape);
m_W->Save(os);
m_b->Save(os);
}
void Load(std::istream &is)
{
// bb::LoadValue(is, m_binary_mode);
is.read((char*)&m_binary_mode, sizeof(m_binary_mode)); // バグに対する後方互換性
m_input_shape = bb::LoadIndices(is);
m_output_shape = bb::LoadIndices(is);
m_W->Load(is);
m_b->Load(is);
}
#ifdef BB_WITH_CEREAL
template <class Archive>
void save(Archive& archive, std::uint32_t const version) const
{
_super::save(archive, version);
archive(cereal::make_nvp("binary_mode", m_binary_mode));
archive(cereal::make_nvp("input_shape", m_input_shape));
archive(cereal::make_nvp("output_shape", m_output_shape));
archive(cereal::make_nvp("W", *m_W));
archive(cereal::make_nvp("b", *m_b));
}
template <class Archive>
void load(Archive& archive, std::uint32_t const version)
{
_super::load(archive, version);
archive(cereal::make_nvp("binary_mode", m_binary_mode));
archive(cereal::make_nvp("input_shape", m_input_shape));
archive(cereal::make_nvp("output_shape", m_output_shape));
m_input_node_size = CalcShapeSize(m_input_shape);
m_output_node_size = CalcShapeSize(m_output_shape);
archive(cereal::make_nvp("W", *m_W));
archive(cereal::make_nvp("b", *m_b));
}
void Save(cereal::JSONOutputArchive& archive) const
{
archive(cereal::make_nvp("DenseAffine", *this));
}
void Load(cereal::JSONInputArchive& archive)
{
archive(cereal::make_nvp("DenseAffine", *this));
}
#endif
};
} |
GrB_getVersion.c | //------------------------------------------------------------------------------
// GrB_getVersion: get the version number of the GraphBLAS C API standard
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// For compile-time access, use GRB_VERSION and GRB_SUBVERSION.
#include "GB.h"
GrB_Info GrB_getVersion // runtime access to C API version number
(
unsigned int *version, // returns GRB_VERSION
unsigned int *subversion // returns GRB_SUBVERSION
)
{
//--------------------------------------------------------------------------
// get the version number
//--------------------------------------------------------------------------
if (version != NULL) (*version ) = GRB_VERSION ;
if (subversion != NULL) (*subversion) = GRB_SUBVERSION ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
GB_ewise_slice.c | //------------------------------------------------------------------------------
// GB_ewise_slice: slice the entries and vectors for an ewise operation
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Constructs a set of tasks to compute C, for an element-wise operation
// (GB_add, GB_emult, and GB_mask) that operates on two input matrices,
// C=op(A,B). The mask is ignored for computing where to slice the work, but
// it is sliced once the location has been found.
// M, A, B: any sparsity structure (hypersparse, sparse, bitmap, or full).
// C: constructed as sparse or hypersparse in the caller.
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Coarse, int64_t) ; \
GB_FREE_WERK (&Cwork, Cwork_size) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_FREE_WERK (&TaskList, TaskList_size) ; \
}
#include "GB.h"
//------------------------------------------------------------------------------
// GB_ewise_slice
//------------------------------------------------------------------------------
GrB_Info GB_ewise_slice
(
// output:
GB_task_struct **p_TaskList, // array of structs
size_t *p_TaskList_size, // size of TaskList
int *p_ntasks, // # of tasks constructed
int *p_nthreads, // # of threads for eWise operation
// input:
const int64_t Cnvec, // # of vectors of C
const int64_t *restrict Ch, // vectors of C, if hypersparse
const int64_t *restrict C_to_M, // mapping of C to M
const int64_t *restrict C_to_A, // mapping of C to A
const int64_t *restrict C_to_B, // mapping of C to B
bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only
const GrB_Matrix M, // mask matrix to slice (optional)
const GrB_Matrix A, // matrix to slice
const GrB_Matrix B, // matrix to slice
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_TaskList != NULL) ;
ASSERT (p_TaskList_size != NULL) ;
ASSERT (p_ntasks != NULL) ;
ASSERT (p_nthreads != NULL) ;
ASSERT_MATRIX_OK (A, "A for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT_MATRIX_OK (B, "B for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (B)) ;
ASSERT (!GB_JUMBLED (B)) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_JUMBLED (M)) ;
ASSERT (!GB_PENDING (M)) ;
(*p_TaskList ) = NULL ;
(*p_TaskList_size) = 0 ;
(*p_ntasks ) = 0 ;
(*p_nthreads ) = 1 ;
int64_t *restrict Cwork = NULL ; size_t Cwork_size = 0 ;
GB_WERK_DECLARE (Coarse, int64_t) ; // size ntasks1+1
int ntasks1 = 0 ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// allocate the initial TaskList
//--------------------------------------------------------------------------
// Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow
// later, if needed. Usually, 64*nthreads_max is enough, but in a few cases
// fine tasks can cause this number to be exceeded. If that occurs,
// TaskList is reallocated.
// When the mask is present, it is often fastest to break the work up
// into tasks, even when nthreads_max is 1.
GB_task_struct *restrict TaskList = NULL ; size_t TaskList_size = 0 ;
int max_ntasks = 0 ;
int ntasks0 = (M == NULL && nthreads_max == 1) ? 1 : (32 * nthreads_max) ;
GB_REALLOC_TASK_WERK (TaskList, ntasks0, max_ntasks) ;
//--------------------------------------------------------------------------
// check for quick return for a single task
//--------------------------------------------------------------------------
if (Cnvec == 0 || ntasks0 == 1)
{
// construct a single coarse task that computes all of C
TaskList [0].kfirst = 0 ;
TaskList [0].klast = Cnvec-1 ;
(*p_TaskList ) = TaskList ;
(*p_TaskList_size) = TaskList_size ;
(*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ;
(*p_nthreads ) = 1 ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// get A, B, and M
//--------------------------------------------------------------------------
const int64_t vlen = A->vlen ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bi = B->i ;
bool Ch_is_Ah = (Ch != NULL && A->h != NULL && Ch == A->h) ;
bool Ch_is_Bh = (Ch != NULL && B->h != NULL && Ch == B->h) ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mi = NULL ;
bool M_is_hyper = GB_IS_HYPERSPARSE (M) ;
if (M != NULL)
{
Mp = M->p ;
Mi = M->i ;
// Ch_is_Mh is true if either true on input (for GB_add, which denotes
// that Ch is a deep copy of M->h), or if Ch is a shallow copy of M->h.
Ch_is_Mh = Ch_is_Mh || (Ch != NULL && M_is_hyper && Ch == M->h) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
Cwork = GB_MALLOC_WERK (Cnvec+1, int64_t, &Cwork_size) ;
if (Cwork == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute an estimate of the work for each vector of C
//--------------------------------------------------------------------------
int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
//----------------------------------------------------------------------
// get the C(:,j) vector
//----------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
//----------------------------------------------------------------------
// get the corresponding vector of A
//----------------------------------------------------------------------
int64_t kA ;
if (C_to_A != NULL)
{
// A is hypersparse and the C_to_A mapping has been created
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
ASSERT (j == GBH (A->h, kA)) ;
}
}
else if (Ch_is_Ah)
{
// A is hypersparse, but Ch is a shallow copy of A->h
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = k ;
ASSERT (j == A->h [kA]) ;
}
else
{
// A is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
kA = j ;
}
//----------------------------------------------------------------------
// get the corresponding vector of B
//----------------------------------------------------------------------
int64_t kB ;
if (C_to_B != NULL)
{
// B is hypersparse and the C_to_B mapping has been created
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
ASSERT (j == GBH (B->h, kB)) ;
}
}
else if (Ch_is_Bh)
{
// B is hypersparse, but Ch is a shallow copy of B->h
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = k ;
ASSERT (j == B->h [kB]) ;
}
else
{
// B is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (B)) ;
kB = j ;
}
//----------------------------------------------------------------------
// estimate the work for C(:,j)
//----------------------------------------------------------------------
ASSERT (kA >= -1 && kA < A->nvec) ;
ASSERT (kB >= -1 && kB < B->nvec) ;
int64_t aknz = (kA < 0) ? 0 :
((Ap == NULL) ? vlen : (Ap [kA+1] - Ap [kA])) ;
int64_t bknz = (kB < 0) ? 0 :
((Bp == NULL) ? vlen : (Bp [kB+1] - Bp [kB])) ;
Cwork [k] = aknz + bknz + 1 ;
}
//--------------------------------------------------------------------------
// replace Cwork with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork, Context) ;
double cwork = (double) Cwork [Cnvec] ;
//--------------------------------------------------------------------------
// determine # of threads and tasks for the eWise operation
//--------------------------------------------------------------------------
int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ;
ntasks0 = (M == NULL && nthreads == 1) ? 1 : (32 * nthreads) ;
double target_task_size = cwork / (double) (ntasks0) ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
ntasks1 = cwork / target_task_size ;
ntasks1 = GB_IMAX (ntasks1, 1) ;
//--------------------------------------------------------------------------
// slice the work into coarse tasks
//--------------------------------------------------------------------------
GB_WERK_PUSH (Coarse, ntasks1 + 1, int64_t) ;
if (Coarse == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (Coarse, Cwork, Cnvec, ntasks1, false) ;
//--------------------------------------------------------------------------
// construct all tasks, both coarse and fine
//--------------------------------------------------------------------------
int ntasks = 0 ;
for (int t = 0 ; t < ntasks1 ; t++)
{
//----------------------------------------------------------------------
// coarse task computes C (:,k:klast)
//----------------------------------------------------------------------
int64_t k = Coarse [t] ;
int64_t klast = Coarse [t+1] - 1 ;
if (k >= Cnvec)
{
//------------------------------------------------------------------
// all tasks have been constructed
//------------------------------------------------------------------
break ;
}
else if (k < klast)
{
//------------------------------------------------------------------
// coarse task has 2 or more vectors
//------------------------------------------------------------------
// This is a non-empty coarse-grain task that does two or more
// entire vectors of C, vectors k:klast, inclusive.
GB_REALLOC_TASK_WERK (TaskList, ntasks + 1, max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = klast ;
ntasks++ ;
}
else
{
//------------------------------------------------------------------
// coarse task has 0 or 1 vectors
//------------------------------------------------------------------
// As a coarse-grain task, this task is empty or does a single
// vector, k. Vector k must be removed from the work done by this
// and any other coarse-grain task, and split into one or more
// fine-grain tasks.
for (int tt = t ; tt < ntasks1 ; tt++)
{
// remove k from the initial slice tt
if (Coarse [tt] == k)
{
// remove k from task tt
Coarse [tt] = k+1 ;
}
else
{
// break, k not in task tt
break ;
}
}
//------------------------------------------------------------------
// get the vector of C
//------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
//------------------------------------------------------------------
// get the corresponding vector of A
//------------------------------------------------------------------
int64_t kA ;
if (C_to_A != NULL)
{
// A is hypersparse and the C_to_A mapping has been created
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = C_to_A [k] ;
}
else if (Ch_is_Ah)
{
// A is hypersparse, but Ch is a shallow copy of A->h
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = k ;
}
else
{
// A is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
kA = j ;
}
int64_t pA_start = (kA < 0) ? (-1) : GBP (Ap, kA, vlen) ;
int64_t pA_end = (kA < 0) ? (-1) : GBP (Ap, kA+1, vlen) ;
bool a_empty = (pA_end == pA_start) ;
//------------------------------------------------------------------
// get the corresponding vector of B
//------------------------------------------------------------------
int64_t kB ;
if (C_to_B != NULL)
{
// B is hypersparse and the C_to_B mapping has been created
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = C_to_B [k] ;
}
else if (Ch_is_Bh)
{
// B is hypersparse, but Ch is a shallow copy of B->h
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = k ;
}
else
{
// B is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (B)) ;
kB = j ;
}
int64_t pB_start = (kB < 0) ? (-1) : GBP (Bp, kB, vlen) ;
int64_t pB_end = (kB < 0) ? (-1) : GBP (Bp, kB+1, vlen) ;
bool b_empty = (pB_end == pB_start) ;
//------------------------------------------------------------------
// get the corresponding vector of M, if present
//------------------------------------------------------------------
// M can have any sparsity structure (hyper, sparse, bitmap, full)
int64_t pM_start = -1 ;
int64_t pM_end = -1 ;
if (M != NULL)
{
int64_t kM ;
if (C_to_M != NULL)
{
// M is hypersparse and the C_to_M mapping has been created
ASSERT (GB_IS_HYPERSPARSE (M)) ;
kM = C_to_M [k] ;
}
else if (Ch_is_Mh)
{
// M is hypersparse, but Ch is a copy of Mh
ASSERT (GB_IS_HYPERSPARSE (M)) ;
// Ch is a deep or shallow copy of Mh
kM = k ;
}
else
{
// M is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (M)) ;
kM = j ;
}
pM_start = (kM < 0) ? -1 : GBP (Mp, kM, vlen) ;
pM_end = (kM < 0) ? -1 : GBP (Mp, kM+1, vlen) ;
}
bool m_empty = (pM_end == pM_start) ;
//------------------------------------------------------------------
// determine the # of fine-grain tasks to create for vector k
//------------------------------------------------------------------
double ckwork = Cwork [k+1] - Cwork [k] ;
int nfine = ckwork / target_task_size ;
nfine = GB_IMAX (nfine, 1) ;
// make the TaskList bigger, if needed
GB_REALLOC_TASK_WERK (TaskList, ntasks + nfine, max_ntasks) ;
//------------------------------------------------------------------
// create the fine-grain tasks
//------------------------------------------------------------------
if (nfine == 1)
{
//--------------------------------------------------------------
// this is a single coarse task for all of vector k
//--------------------------------------------------------------
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = k ;
ntasks++ ;
}
else
{
//--------------------------------------------------------------
// slice vector k into nfine fine tasks
//--------------------------------------------------------------
// first fine task starts at the top of vector k
ASSERT (ntasks < max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -1 ; // this is a fine task
TaskList [ntasks].pM = (m_empty) ? -1 : pM_start ;
TaskList [ntasks].pA = (a_empty) ? -1 : pA_start ;
TaskList [ntasks].pB = (b_empty) ? -1 : pB_start ;
TaskList [ntasks].len = 0 ; // to be determined below
ntasks++ ;
int64_t ilast = 0, i = 0 ;
for (int tfine = 1 ; tfine < nfine ; tfine++)
{
double target_work = ((nfine-tfine) * ckwork) / nfine ;
int64_t pM, pA, pB ;
GB_slice_vector (&i, &pM, &pA, &pB,
pM_start, pM_end, Mi,
pA_start, pA_end, Ai,
pB_start, pB_end, Bi,
vlen, target_work) ;
// prior task ends at pM-1, pA-1, and pB-1
TaskList [ntasks-1].pM_end = pM ;
TaskList [ntasks-1].pA_end = pA ;
TaskList [ntasks-1].pB_end = pB ;
// prior task handles indices ilast:i-1
TaskList [ntasks-1].len = i - ilast ;
// this task starts at pM, pA, and pB
ASSERT (ntasks < max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -1 ; // this is a fine task
TaskList [ntasks].pM = pM ;
TaskList [ntasks].pA = pA ;
TaskList [ntasks].pB = pB ;
// advance to the next task
ntasks++ ;
ilast = i ;
}
// Terminate the last fine task.
ASSERT (ntasks <= max_ntasks) ;
TaskList [ntasks-1].pM_end = (m_empty) ? -1 : pM_end ;
TaskList [ntasks-1].pA_end = (a_empty) ? -1 : pA_end ;
TaskList [ntasks-1].pB_end = (b_empty) ? -1 : pB_end ;
TaskList [ntasks-1].len = vlen - i ;
}
}
}
ASSERT (ntasks <= max_ntasks) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*p_TaskList ) = TaskList ;
(*p_TaskList_size) = TaskList_size ;
(*p_ntasks ) = ntasks ;
(*p_nthreads ) = nthreads ;
return (GrB_SUCCESS) ;
}
|
GB_unaryop__ainv_fp64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp64_int32
// op(A') function: GB_tran__ainv_fp64_int32
// C type: double
// A type: int32_t
// cast: double cij = (double) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp64_int32
(
double *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00_0 = vld1q_f32(k0);
float32x4_t _k01_0 = vld1q_f32(k0 + 4);
float32x4_t _k02_0 = vld1q_f32(k0 + 8);
float32x4_t _k10_0 = vld1q_f32(k0 + 12);
float32x4_t _k11_0 = vld1q_f32(k0 + 16);
float32x4_t _k12_0 = vld1q_f32(k0 + 20);
float32x4_t _k20_0 = vld1q_f32(k0 + 24);
float32x4_t _k21_0 = vld1q_f32(k0 + 28);
float32x4_t _k22_0 = vld1q_f32(k0 + 32);
float32x4_t _k00_1 = vld1q_f32(k1);
float32x4_t _k01_1 = vld1q_f32(k1 + 4);
float32x4_t _k02_1 = vld1q_f32(k1 + 8);
float32x4_t _k10_1 = vld1q_f32(k1 + 12);
float32x4_t _k11_1 = vld1q_f32(k1 + 16);
float32x4_t _k12_1 = vld1q_f32(k1 + 20);
float32x4_t _k20_1 = vld1q_f32(k1 + 24);
float32x4_t _k21_1 = vld1q_f32(k1 + 28);
float32x4_t _k22_1 = vld1q_f32(k1 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"ld1 {v1.2s}, [%2] \n"
"fmla v24.4s, %10.4s, v0.s[0] \n"
"fmla v25.4s, %10.4s, v0.s[1] \n"
"fmla v26.4s, %10.4s, v0.s[2] \n"
"fmla v27.4s, %10.4s, v0.s[3] \n"
"fmla v28.4s, %19.4s, v0.s[0] \n"
"fmla v29.4s, %19.4s, v0.s[1] \n"
"fmla v30.4s, %19.4s, v0.s[2] \n"
"fmla v31.4s, %19.4s, v0.s[3] \n"
"fmla v24.4s, %11.4s, v0.s[1] \n"
"fmla v25.4s, %11.4s, v0.s[2] \n"
"fmla v26.4s, %11.4s, v0.s[3] \n"
"fmla v27.4s, %11.4s, v1.s[0] \n"
"fmla v28.4s, %20.4s, v0.s[1] \n"
"fmla v29.4s, %20.4s, v0.s[2] \n"
"fmla v30.4s, %20.4s, v0.s[3] \n"
"fmla v31.4s, %20.4s, v1.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.2s}, [%3] \n"
"fmla v24.4s, %12.4s, v0.s[2] \n"
"fmla v25.4s, %12.4s, v0.s[3] \n"
"fmla v26.4s, %12.4s, v1.s[0] \n"
"fmla v27.4s, %12.4s, v1.s[1] \n"
"fmla v28.4s, %21.4s, v0.s[2] \n"
"fmla v29.4s, %21.4s, v0.s[3] \n"
"fmla v30.4s, %21.4s, v1.s[0] \n"
"fmla v31.4s, %21.4s, v1.s[1] \n"
"fmla v24.4s, %13.4s, v2.s[0] \n"
"fmla v25.4s, %13.4s, v2.s[1] \n"
"fmla v26.4s, %13.4s, v2.s[2] \n"
"fmla v27.4s, %13.4s, v2.s[3] \n"
"fmla v28.4s, %22.4s, v2.s[0] \n"
"fmla v29.4s, %22.4s, v2.s[1] \n"
"fmla v30.4s, %22.4s, v2.s[2] \n"
"fmla v31.4s, %22.4s, v2.s[3] \n"
"fmla v24.4s, %14.4s, v2.s[1] \n"
"fmla v25.4s, %14.4s, v2.s[2] \n"
"fmla v26.4s, %14.4s, v2.s[3] \n"
"fmla v27.4s, %14.4s, v3.s[0] \n"
"fmla v28.4s, %23.4s, v2.s[1] \n"
"fmla v29.4s, %23.4s, v2.s[2] \n"
"fmla v30.4s, %23.4s, v2.s[3] \n"
"fmla v31.4s, %23.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4s}, [%4], #16 \n"
"ld1 {v1.2s}, [%4] \n"
"fmla v24.4s, %15.4s, v2.s[2] \n"
"fmla v25.4s, %15.4s, v2.s[3] \n"
"fmla v26.4s, %15.4s, v3.s[0] \n"
"fmla v27.4s, %15.4s, v3.s[1] \n"
"fmla v28.4s, %24.4s, v2.s[2] \n"
"fmla v29.4s, %24.4s, v2.s[3] \n"
"fmla v30.4s, %24.4s, v3.s[0] \n"
"fmla v31.4s, %24.4s, v3.s[1] \n"
"fmla v24.4s, %16.4s, v0.s[0] \n"
"fmla v25.4s, %16.4s, v0.s[1] \n"
"fmla v26.4s, %16.4s, v0.s[2] \n"
"fmla v27.4s, %16.4s, v0.s[3] \n"
"fmla v28.4s, %25.4s, v0.s[0] \n"
"fmla v29.4s, %25.4s, v0.s[1] \n"
"fmla v30.4s, %25.4s, v0.s[2] \n"
"fmla v31.4s, %25.4s, v0.s[3] \n"
"fmla v24.4s, %17.4s, v0.s[1] \n"
"fmla v25.4s, %17.4s, v0.s[2] \n"
"fmla v26.4s, %17.4s, v0.s[3] \n"
"fmla v27.4s, %17.4s, v1.s[0] \n"
"fmla v28.4s, %26.4s, v0.s[1] \n"
"fmla v29.4s, %26.4s, v0.s[2] \n"
"fmla v30.4s, %26.4s, v0.s[3] \n"
"fmla v31.4s, %26.4s, v1.s[0] \n"
"fmla v24.4s, %18.4s, v0.s[2] \n"
"fmla v25.4s, %18.4s, v0.s[3] \n"
"fmla v26.4s, %18.4s, v1.s[0] \n"
"fmla v27.4s, %18.4s, v1.s[1] \n"
"fmla v28.4s, %27.4s, v0.s[2] \n"
"fmla v29.4s, %27.4s, v0.s[3] \n"
"fmla v30.4s, %27.4s, v1.s[0] \n"
"fmla v31.4s, %27.4s, v1.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1], #64 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00_0), // %10
"w"(_k01_0), // %11
"w"(_k02_0), // %12
"w"(_k10_0), // %13
"w"(_k11_0), // %14
"w"(_k12_0), // %15
"w"(_k20_0), // %16
"w"(_k21_0), // %17
"w"(_k22_0), // %18
"w"(_k00_1), // %19
"w"(_k01_1), // %20
"w"(_k02_1), // %21
"w"(_k10_1), // %22
"w"(_k11_1), // %23
"w"(_k12_1), // %24
"w"(_k20_1), // %25
"w"(_k21_1), // %26
"w"(_k22_1) // %27
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v24.4s, v25.4s}, [%0] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v26.4s, v27.4s}, [%1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2] \n"
"add %2, %2, #8 \n"
"fmla v24.4s, %10.4s, v0.s[0] \n"
"fmla v25.4s, %10.4s, v0.s[1] \n"
"fmla v26.4s, %19.4s, v0.s[0] \n"
"fmla v27.4s, %19.4s, v0.s[1] \n"
"fmla v24.4s, %11.4s, v0.s[1] \n"
"fmla v25.4s, %11.4s, v0.s[2] \n"
"fmla v26.4s, %20.4s, v0.s[1] \n"
"fmla v27.4s, %20.4s, v0.s[2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v1.4s}, [%3] \n"
"fmla v24.4s, %12.4s, v0.s[2] \n"
"fmla v25.4s, %12.4s, v0.s[3] \n"
"fmla v26.4s, %21.4s, v0.s[2] \n"
"fmla v27.4s, %21.4s, v0.s[3] \n"
"add %3, %3, #8 \n"
"fmla v24.4s, %13.4s, v1.s[0] \n"
"fmla v25.4s, %13.4s, v1.s[1] \n"
"fmla v26.4s, %22.4s, v1.s[0] \n"
"fmla v27.4s, %22.4s, v1.s[1] \n"
"fmla v24.4s, %14.4s, v1.s[1] \n"
"fmla v25.4s, %14.4s, v1.s[2] \n"
"fmla v26.4s, %23.4s, v1.s[1] \n"
"fmla v27.4s, %23.4s, v1.s[2] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4s}, [%4] \n"
"fmla v24.4s, %15.4s, v1.s[2] \n"
"fmla v25.4s, %15.4s, v1.s[3] \n"
"fmla v26.4s, %24.4s, v1.s[2] \n"
"fmla v27.4s, %24.4s, v1.s[3] \n"
"add %4, %4, #8 \n"
"fmla v24.4s, %16.4s, v0.s[0] \n"
"fmla v25.4s, %16.4s, v0.s[1] \n"
"fmla v26.4s, %25.4s, v0.s[0] \n"
"fmla v27.4s, %25.4s, v0.s[1] \n"
"fmla v24.4s, %17.4s, v0.s[1] \n"
"fmla v25.4s, %17.4s, v0.s[2] \n"
"fmla v26.4s, %26.4s, v0.s[1] \n"
"fmla v27.4s, %26.4s, v0.s[2] \n"
"fmla v24.4s, %18.4s, v0.s[2] \n"
"fmla v25.4s, %18.4s, v0.s[3] \n"
"fmla v26.4s, %27.4s, v0.s[2] \n"
"fmla v27.4s, %27.4s, v0.s[3] \n"
"st1 {v24.4s, v25.4s}, [%0], #32 \n"
"st1 {v26.4s, v27.4s}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00_0), // %10
"w"(_k01_0), // %11
"w"(_k02_0), // %12
"w"(_k10_0), // %13
"w"(_k11_0), // %14
"w"(_k12_0), // %15
"w"(_k20_0), // %16
"w"(_k21_0), // %17
"w"(_k22_0), // %18
"w"(_k00_1), // %19
"w"(_k01_1), // %20
"w"(_k02_1), // %21
"w"(_k10_1), // %22
"w"(_k11_1), // %23
"w"(_k12_1), // %24
"w"(_k20_1), // %25
"w"(_k21_1), // %26
"w"(_k22_1) // %27
: "memory", "v0", "v1", "v24", "v25", "v26", "v27");
}
for (; j < outw; j++)
{
float32x4_t _sum00 = vld1q_f32(outptr0);
float32x4_t _sum10 = vld1q_f32(outptr1);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
_sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr1, _sum10);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
outptr1 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k10 = vld1q_f32(k0 + 12);
float32x4_t _k11 = vld1q_f32(k0 + 16);
float32x4_t _k12 = vld1q_f32(k0 + 20);
float32x4_t _k20 = vld1q_f32(k0 + 24);
float32x4_t _k21 = vld1q_f32(k0 + 28);
float32x4_t _k22 = vld1q_f32(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"fmla v28.4s, %8.4s, v1.s[0] \n"
"fmla v29.4s, %8.4s, v1.s[1] \n"
"fmla v30.4s, %8.4s, v1.s[2] \n"
"fmla v31.4s, %8.4s, v1.s[3] \n"
"ld1 {v2.2s}, [%1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"fmla v28.4s, %9.4s, v1.s[1] \n"
"fmla v29.4s, %9.4s, v1.s[2] \n"
"fmla v30.4s, %9.4s, v1.s[3] \n"
"fmla v31.4s, %9.4s, v2.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4s, v5.4s}, [%2], #32 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v28.4s, %10.4s, v1.s[2] \n"
"fmla v29.4s, %10.4s, v1.s[3] \n"
"fmla v30.4s, %10.4s, v2.s[0] \n"
"fmla v31.4s, %10.4s, v2.s[1] \n"
"ld1 {v2.2s}, [%2] \n"
"fmla v24.4s, %11.4s, v4.s[0] \n"
"fmla v25.4s, %11.4s, v4.s[1] \n"
"fmla v26.4s, %11.4s, v4.s[2] \n"
"fmla v27.4s, %11.4s, v4.s[3] \n"
"fmla v28.4s, %11.4s, v5.s[0] \n"
"fmla v29.4s, %11.4s, v5.s[1] \n"
"fmla v30.4s, %11.4s, v5.s[2] \n"
"fmla v31.4s, %11.4s, v5.s[3] \n"
"fmla v24.4s, %12.4s, v4.s[1] \n"
"fmla v25.4s, %12.4s, v4.s[2] \n"
"fmla v26.4s, %12.4s, v4.s[3] \n"
"fmla v27.4s, %12.4s, v5.s[0] \n"
"fmla v28.4s, %12.4s, v5.s[1] \n"
"fmla v29.4s, %12.4s, v5.s[2] \n"
"fmla v30.4s, %12.4s, v5.s[3] \n"
"fmla v31.4s, %12.4s, v2.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"
"fmla v24.4s, %13.4s, v4.s[2] \n"
"fmla v25.4s, %13.4s, v4.s[3] \n"
"fmla v26.4s, %13.4s, v5.s[0] \n"
"fmla v27.4s, %13.4s, v5.s[1] \n"
"fmla v28.4s, %13.4s, v5.s[2] \n"
"fmla v29.4s, %13.4s, v5.s[3] \n"
"fmla v30.4s, %13.4s, v2.s[0] \n"
"fmla v31.4s, %13.4s, v2.s[1] \n"
"ld1 {v2.2s}, [%3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v28.4s, %14.4s, v1.s[0] \n"
"fmla v29.4s, %14.4s, v1.s[1] \n"
"fmla v30.4s, %14.4s, v1.s[2] \n"
"fmla v31.4s, %14.4s, v1.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v28.4s, %15.4s, v1.s[1] \n"
"fmla v29.4s, %15.4s, v1.s[2] \n"
"fmla v30.4s, %15.4s, v1.s[3] \n"
"fmla v31.4s, %15.4s, v2.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"fmla v28.4s, %16.4s, v1.s[2] \n"
"fmla v29.4s, %16.4s, v1.s[3] \n"
"fmla v30.4s, %16.4s, v2.s[0] \n"
"fmla v31.4s, %16.4s, v2.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif // __aarch64__
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"ld1 {v1.2s}, [%1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"ld1 {v3.2s}, [%2] \n"
"fmla v24.4s, %11.4s, v2.s[0] \n"
"fmla v25.4s, %11.4s, v2.s[1] \n"
"fmla v26.4s, %11.4s, v2.s[2] \n"
"fmla v27.4s, %11.4s, v2.s[3] \n"
"fmla v24.4s, %12.4s, v2.s[1] \n"
"fmla v25.4s, %12.4s, v2.s[2] \n"
"fmla v26.4s, %12.4s, v2.s[3] \n"
"fmla v27.4s, %12.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n"
"fmla v24.4s, %13.4s, v2.s[2] \n"
"fmla v25.4s, %13.4s, v2.s[3] \n"
"fmla v26.4s, %13.4s, v3.s[0] \n"
"fmla v27.4s, %13.4s, v3.s[1] \n"
"ld1 {v1.2s}, [%3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"vmla.f32 q12, %q8, d0[0] \n"
"vmla.f32 q13, %q8, d0[1] \n"
"vmla.f32 q14, %q8, d1[0] \n"
"vmla.f32 q15, %q8, d1[1] \n"
"vld1.f32 {d2}, [%1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"vmla.f32 q14, %q9, d1[1] \n"
"vmla.f32 q15, %q9, d2[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"vmla.f32 q12, %q10, d1[0] \n"
"vmla.f32 q13, %q10, d1[1] \n"
"vmla.f32 q14, %q10, d2[0] \n"
"vmla.f32 q15, %q10, d2[1] \n"
"vmla.f32 q12, %q11, d4[0] \n"
"vmla.f32 q13, %q11, d4[1] \n"
"vmla.f32 q14, %q11, d5[0] \n"
"vmla.f32 q15, %q11, d5[1] \n"
"vld1.f32 {d3}, [%2] \n"
"vmla.f32 q12, %q12, d4[1] \n"
"vmla.f32 q13, %q12, d5[0] \n"
"vmla.f32 q14, %q12, d5[1] \n"
"vmla.f32 q15, %q12, d3[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q12, %q13, d5[0] \n"
"vmla.f32 q13, %q13, d5[1] \n"
"vmla.f32 q14, %q13, d3[0] \n"
"vmla.f32 q15, %q13, d3[1] \n"
"vmla.f32 q12, %q14, d0[0] \n"
"vmla.f32 q13, %q14, d0[1] \n"
"vmla.f32 q14, %q14, d1[0] \n"
"vmla.f32 q15, %q14, d1[1] \n"
"vld1.f32 {d2}, [%3] \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"vmla.f32 q14, %q15, d1[1] \n"
"vmla.f32 q15, %q15, d2[0] \n"
"vmla.f32 q12, %q16, d1[0] \n"
"vmla.f32 q13, %q16, d1[1] \n"
"vmla.f32 q14, %q16, d2[0] \n"
"vmla.f32 q15, %q16, d2[1] \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v24.4s, v25.4s}, [%0] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmul v26.4s, %8.4s, v0.s[0] \n"
"fmul v27.4s, %8.4s, v0.s[1] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v1.4s}, [%2] \n"
"fmla v26.4s, %10.4s, v0.s[2] \n"
"fmla v27.4s, %10.4s, v0.s[3] \n"
"fmla v24.4s, %11.4s, v1.s[0] \n"
"fmla v25.4s, %11.4s, v1.s[1] \n"
"add %1, %1, #8 \n"
"fmla v26.4s, %12.4s, v1.s[1] \n"
"fmla v27.4s, %12.4s, v1.s[2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3] \n"
"fmla v24.4s, %13.4s, v1.s[2] \n"
"fmla v25.4s, %13.4s, v1.s[3] \n"
"fmla v26.4s, %14.4s, v0.s[0] \n"
"fmla v27.4s, %14.4s, v0.s[1] \n"
"add %2, %2, #8 \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %16.4s, v0.s[2] \n"
"fmla v27.4s, %16.4s, v0.s[3] \n"
"add %3, %3, #8 \n"
"fadd v24.4s, v24.4s, v26.4s \n"
"fadd v25.4s, v25.4s, v27.4s \n"
"st1 {v24.4s, v25.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmul.f32 q14, %q8, d0[0] \n"
"vmul.f32 q15, %q8, d0[1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d2-d3}, [%2] \n"
"vmla.f32 q14, %q10, d1[0] \n"
"vmla.f32 q15, %q10, d1[1] \n"
"vmla.f32 q12, %q11, d2[0] \n"
"vmla.f32 q13, %q11, d2[1] \n"
"add %1, %1, #8 \n"
"vmla.f32 q14, %q12, d2[1] \n"
"vmla.f32 q15, %q12, d3[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3] \n"
"vmla.f32 q12, %q13, d3[0] \n"
"vmla.f32 q13, %q13, d3[1] \n"
"vmla.f32 q14, %q14, d0[0] \n"
"vmla.f32 q15, %q14, d0[1] \n"
"add %2, %2, #8 \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"vmla.f32 q14, %q16, d1[0] \n"
"vmla.f32 q15, %q16, d1[1] \n"
"add %3, %3, #8 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
}
}
static void conv3x3s2_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00_0 = vld1q_f32(k0);
float32x4_t _k01_0 = vld1q_f32(k0 + 4);
float32x4_t _k02_0 = vld1q_f32(k0 + 8);
float32x4_t _k10_0 = vld1q_f32(k0 + 12);
float32x4_t _k11_0 = vld1q_f32(k0 + 16);
float32x4_t _k12_0 = vld1q_f32(k0 + 20);
float32x4_t _k20_0 = vld1q_f32(k0 + 24);
float32x4_t _k21_0 = vld1q_f32(k0 + 28);
float32x4_t _k22_0 = vld1q_f32(k0 + 32);
float32x4_t _k00_1 = vld1q_f32(k1);
float32x4_t _k01_1 = vld1q_f32(k1 + 4);
float32x4_t _k02_1 = vld1q_f32(k1 + 8);
float32x4_t _k10_1 = vld1q_f32(k1 + 12);
float32x4_t _k11_1 = vld1q_f32(k1 + 16);
float32x4_t _k12_1 = vld1q_f32(k1 + 20);
float32x4_t _k20_1 = vld1q_f32(k1 + 24);
float32x4_t _k21_1 = vld1q_f32(k1 + 28);
float32x4_t _k22_1 = vld1q_f32(k1 + 32);
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 2;
int remain = outw & 3;
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n" // sum0
// r0
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"
"ld1r {v4.4s}, [%3] \n"
"fmla v6.4s, %12.4s, v0.s[0] \n"
"fmla v7.4s, %12.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n" // sum1
"fmla v8.4s, %12.4s, v1.s[0] \n"
"fmla v9.4s, %12.4s, v1.s[2] \n"
"fmla v10.4s, %21.4s, v0.s[0] \n"
"fmla v11.4s, %21.4s, v0.s[2] \n"
"fmla v12.4s, %21.4s, v1.s[0] \n"
"fmla v13.4s, %21.4s, v1.s[2] \n"
"fmla v6.4s, %13.4s, v0.s[1] \n"
"fmla v7.4s, %13.4s, v0.s[3] \n"
"fmla v8.4s, %13.4s, v1.s[1] \n"
"fmla v9.4s, %13.4s, v1.s[3] \n"
"fmla v10.4s, %22.4s, v0.s[1] \n"
"fmla v11.4s, %22.4s, v0.s[3] \n"
"fmla v12.4s, %22.4s, v1.s[1] \n"
"fmla v13.4s, %22.4s, v1.s[3] \n"
// r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v2.4s, v3.4s}, [%4], #32 \n"
"ld1r {v5.4s}, [%4] \n"
"fmla v6.4s, %14.4s, v0.s[2] \n"
"fmla v7.4s, %14.4s, v1.s[0] \n"
"fmla v8.4s, %14.4s, v1.s[2] \n"
"fmla v9.4s, %14.4s, v4.s[0] \n"
"fmla v10.4s, %23.4s, v0.s[2] \n"
"fmla v11.4s, %23.4s, v1.s[0] \n"
"fmla v12.4s, %23.4s, v1.s[2] \n"
"fmla v13.4s, %23.4s, v4.s[0] \n"
"fmla v6.4s, %15.4s, v2.s[0] \n"
"fmla v7.4s, %15.4s, v2.s[2] \n"
"fmla v8.4s, %15.4s, v3.s[0] \n"
"fmla v9.4s, %15.4s, v3.s[2] \n"
"fmla v10.4s, %24.4s, v2.s[0] \n"
"fmla v11.4s, %24.4s, v2.s[2] \n"
"fmla v12.4s, %24.4s, v3.s[0] \n"
"fmla v13.4s, %24.4s, v3.s[2] \n"
"fmla v6.4s, %16.4s, v2.s[1] \n"
"fmla v7.4s, %16.4s, v2.s[3] \n"
"fmla v8.4s, %16.4s, v3.s[1] \n"
"fmla v9.4s, %16.4s, v3.s[3] \n"
"fmla v10.4s, %25.4s, v2.s[1] \n"
"fmla v11.4s, %25.4s, v2.s[3] \n"
"fmla v12.4s, %25.4s, v3.s[1] \n"
"fmla v13.4s, %25.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n"
"ld1r {v4.4s}, [%5] \n"
"fmla v6.4s, %17.4s, v2.s[2] \n"
"fmla v7.4s, %17.4s, v3.s[0] \n"
"fmla v8.4s, %17.4s, v3.s[2] \n"
"fmla v9.4s, %17.4s, v5.s[0] \n"
"fmla v10.4s, %26.4s, v2.s[2] \n"
"fmla v11.4s, %26.4s, v3.s[0] \n"
"fmla v12.4s, %26.4s, v3.s[2] \n"
"fmla v13.4s, %26.4s, v5.s[0] \n"
"fmla v6.4s, %18.4s, v0.s[0] \n"
"fmla v7.4s, %18.4s, v0.s[2] \n"
"fmla v8.4s, %18.4s, v1.s[0] \n"
"fmla v9.4s, %18.4s, v1.s[2] \n"
"fmla v10.4s, %27.4s, v0.s[0] \n"
"fmla v11.4s, %27.4s, v0.s[2] \n"
"fmla v12.4s, %27.4s, v1.s[0] \n"
"fmla v13.4s, %27.4s, v1.s[2] \n"
"fmla v6.4s, %19.4s, v0.s[1] \n"
"fmla v7.4s, %19.4s, v0.s[3] \n"
"fmla v8.4s, %19.4s, v1.s[1] \n"
"fmla v9.4s, %19.4s, v1.s[3] \n"
"fmla v10.4s, %28.4s, v0.s[1] \n"
"fmla v11.4s, %28.4s, v0.s[3] \n"
"fmla v12.4s, %28.4s, v1.s[1] \n"
"fmla v13.4s, %28.4s, v1.s[3] \n"
"fmla v6.4s, %20.4s, v0.s[2] \n"
"fmla v7.4s, %20.4s, v1.s[0] \n"
"fmla v8.4s, %20.4s, v1.s[2] \n"
"fmla v9.4s, %20.4s, v4.s[0] \n"
"fmla v10.4s, %29.4s, v0.s[2] \n"
"fmla v11.4s, %29.4s, v1.s[0] \n"
"fmla v12.4s, %29.4s, v1.s[2] \n"
"fmla v13.4s, %29.4s, v4.s[0] \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n"
"st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; remain > 0; remain--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr1);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
_sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
outptr1 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0 + 4);
float32x4_t _k02 = vld1q_f32(k0 + 8);
float32x4_t _k10 = vld1q_f32(k0 + 12);
float32x4_t _k11 = vld1q_f32(k0 + 16);
float32x4_t _k12 = vld1q_f32(k0 + 20);
float32x4_t _k20 = vld1q_f32(k0 + 24);
float32x4_t _k21 = vld1q_f32(k0 + 28);
float32x4_t _k22 = vld1q_f32(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 2;
int remain = outw & 3;
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n" // sum0
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"
"ld1r {v4.4s}, [%2] \n"
"fmla v6.4s, %10.4s, v0.s[0] \n"
"fmla v7.4s, %10.4s, v0.s[2] \n"
"fmla v8.4s, %10.4s, v1.s[0] \n"
"fmla v9.4s, %10.4s, v1.s[2] \n"
"fmla v6.4s, %11.4s, v0.s[1] \n"
"fmla v7.4s, %11.4s, v0.s[3] \n"
"fmla v8.4s, %11.4s, v1.s[1] \n"
"fmla v9.4s, %11.4s, v1.s[3] \n"
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v2.4s, v3.4s}, [%3], #32 \n"
"ld1r {v5.4s}, [%3] \n"
"fmla v6.4s, %12.4s, v0.s[2] \n"
"fmla v7.4s, %12.4s, v1.s[0] \n"
"fmla v8.4s, %12.4s, v1.s[2] \n"
"fmla v9.4s, %12.4s, v4.s[0] \n"
"fmla v6.4s, %13.4s, v2.s[0] \n"
"fmla v7.4s, %13.4s, v2.s[2] \n"
"fmla v8.4s, %13.4s, v3.s[0] \n"
"fmla v9.4s, %13.4s, v3.s[2] \n"
"fmla v6.4s, %14.4s, v2.s[1] \n"
"fmla v7.4s, %14.4s, v2.s[3] \n"
"fmla v8.4s, %14.4s, v3.s[1] \n"
"fmla v9.4s, %14.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4s, v1.4s}, [%4], #32 \n"
"ld1r {v4.4s}, [%4] \n"
"fmla v6.4s, %15.4s, v2.s[2] \n"
"fmla v7.4s, %15.4s, v3.s[0] \n"
"fmla v8.4s, %15.4s, v3.s[2] \n"
"fmla v9.4s, %15.4s, v5.s[0] \n"
"fmla v6.4s, %16.4s, v0.s[0] \n"
"fmla v7.4s, %16.4s, v0.s[2] \n"
"fmla v8.4s, %16.4s, v1.s[0] \n"
"fmla v9.4s, %16.4s, v1.s[2] \n"
"fmla v6.4s, %17.4s, v0.s[1] \n"
"fmla v7.4s, %17.4s, v0.s[3] \n"
"fmla v8.4s, %17.4s, v1.s[1] \n"
"fmla v9.4s, %17.4s, v1.s[3] \n"
"fmla v6.4s, %18.4s, v0.s[2] \n"
"fmla v7.4s, %18.4s, v1.s[0] \n"
"fmla v8.4s, %18.4s, v1.s[2] \n"
"fmla v9.4s, %18.4s, v4.s[0] \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
}
#else // __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #512] \n"
"vldm %1, {d0-d7} \n" // sum0
// r0
"pld [%2, #256] \n"
"vld1.f32 {d8-d11}, [%2]! \n"
"vld1.f32 {d12[]}, [%2] \n"
"vmla.f32 q0, %q10, d8[0] \n"
"vmla.f32 q1, %q10, d9[0] \n"
"vmla.f32 q2, %q10, d10[0] \n"
"vmla.f32 q3, %q10, d11[0] \n"
"vmla.f32 q0, %q11, d8[1] \n"
"vmla.f32 q1, %q11, d9[1] \n"
"vmla.f32 q2, %q11, d10[1] \n"
"vmla.f32 q3, %q11, d11[1] \n"
"vmla.f32 q0, %q12, d9[0] \n"
"vmla.f32 q1, %q12, d10[0] \n"
"vmla.f32 q2, %q12, d11[0] \n"
// r1
"pld [%3, #256] \n"
"vld1.f32 {d8-d11}, [%3]! \n"
"vld1.f32 {d13[]}, [%3] \n"
"vmla.f32 q3, %q12, d12[0] \n"
"vmla.f32 q0, %q13, d8[0] \n"
"vmla.f32 q1, %q13, d9[0] \n"
"vmla.f32 q2, %q13, d10[0] \n"
"vmla.f32 q3, %q13, d11[0] \n"
"vmla.f32 q0, %q14, d8[1] \n"
"vmla.f32 q1, %q14, d9[1] \n"
"vmla.f32 q2, %q14, d10[1] \n"
"vmla.f32 q3, %q14, d11[1] \n"
"vmla.f32 q0, %q15, d9[0] \n"
"vmla.f32 q1, %q15, d10[0] \n"
"vmla.f32 q2, %q15, d11[0] \n"
// r2
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4]! \n"
"vld1.f32 {d12[]}, [%4] \n"
"vmla.f32 q3, %q15, d13[0] \n"
"vmla.f32 q0, %q16, d8[0] \n"
"vmla.f32 q1, %q16, d9[0] \n"
"vmla.f32 q2, %q16, d10[0] \n"
"vmla.f32 q3, %q16, d11[0] \n"
"vmla.f32 q0, %q17, d8[1] \n"
"vmla.f32 q1, %q17, d9[1] \n"
"vmla.f32 q2, %q17, d10[1] \n"
"vmla.f32 q3, %q17, d11[1] \n"
"vmla.f32 q0, %q18, d9[0] \n"
"vmla.f32 q1, %q18, d10[0] \n"
"vmla.f32 q2, %q18, d11[0] \n"
"vmla.f32 q3, %q18, d12[0] \n"
"subs %0, %0, #1 \n"
"vstm %1!, {d0-d7} \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
}
#endif // __aarch64__
for (; remain > 0; remain--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
}
}
|
quantum.h | #pragma once
#include <global.h>
#include <randomutil.h>
#include <mathutil.h>
#include <testutil.h>
#include <iomanip>
#include "state_manipulator.h"
ns_easyquantum
#define TO_STRING_CASE(gatetype) case GateType::gatetype:
#define TO_STRING_DAG if (dag) {ss<<"dag ";}
/* gate type definition */
enum class GateType {
null,
CNOT,
RX,
RY,
RZ,
H,
DIAGONAL,
I,
};
constexpr double quantum_default_threshold = 1e-8;
// forward decl
template<typename T> class Circuit;
template<typename T> class Gate;
template<typename qtraits = default_qtraits>
class Gate {
public:
using fp_t = typename qtraits::value_t;
using qid = typename qtraits::qidx_t;
std::vector<qid> qubits;
fp_t argument;
GateType type = GateType::null;
std::vector<qid> controllers;
bool dag = false;
Gate() {}
Gate(GateType type_, std::vector<qid> qs, fp_t arg = 0)
: type(type_),
qubits(qs.begin(), qs.end()),
argument(arg) {}
Gate(const Gate<qtraits>& g) {
qubits.assign(g.qubits.begin(), g.qubits.end());
argument = g.argument;
type = g.type;
controllers.assign(g.controllers.begin(), g.controllers.end());
dag = g.dag;
}
Gate<qtraits> valuecopy() const {
Gate<qtraits> g;
g.qubits = qubits;
g.argument = argument;
g.type = type;
g.controllers = controllers;
g.dag = dag;
return g;
}
void dagger() {
dag = !dag;
}
Gate<qtraits> get_dagger() const {
Gate<qtraits> g(*this);
g.dag = !g.dag;
return g;
}
void control(std::vector<qid> new_controllers) {
controllers.insert(g.controllers.end(),
new_controllers.begin(), new_controllers.end());
}
Gate<qtraits> get_control(std::vector<qid> new_controllers) const {
Gate<qtraits> g(*this);
g.controllers.insert(g.controllers.end(), new_controllers.begin(), new_controllers.end());
return g;
}
static std::string qasm_style_qubit(qid i) {
std::stringstream ss;
ss << "q[" << i << "]";
return ss.str();
}
static std::string controller_to_string(std::vector<qid> controllers) {
if (controllers.size() == 0) return std::string();
std::stringstream ss;
ss << "ctrl:{" << vec2str(controllers) << "}";
return ss.str();
}
static std::string params_to_string(std::vector<fp_t> args) {
if (args.size() == 0) return std::string();
std::stringstream ss;
ss << std::showpoint;
ss << "(" << vec2str(args, ", ", SHOWPOINT) << ")";
return ss.str();
}
std::string to_string() const {
std::stringstream ss;
switch (type) {
TO_STRING_CASE(CNOT) {
ss << "CNOT ";
ss << qubits[0] << ", " << qubits[1] << " ";
ss << controller_to_string(controllers);
return ss.str();
}
TO_STRING_CASE(RX) {
ss << "RX ";
TO_STRING_DAG;
ss << qubits[0] << " ";
ss << params_to_string({ argument });
ss << " ";
ss << controller_to_string(controllers);
return ss.str();
}
TO_STRING_CASE(RY) {
ss << "RY ";
TO_STRING_DAG;
ss << qubits[0] << " ";
ss << params_to_string({ argument });
ss << " ";
ss << controller_to_string(controllers);
return ss.str();
}
TO_STRING_CASE(RZ) {
ss << "RZ ";
TO_STRING_DAG;
ss << qubits[0] << " ";
ss << params_to_string({ argument });
ss << " ";
ss << controller_to_string(controllers);
return ss.str();
}
TO_STRING_CASE(H) {
ss << "H ";
ss << qubits[0] << " ";
ss << controller_to_string(controllers);
return ss.str();
}
TO_STRING_CASE(DIAGONAL) {
ss << "Diagonal ";
ss << 0 << ";";
return ss.str();
}
TO_STRING_CASE(I) {
ss << "I ";
ss << qubits[0] << " ";
ss << params_to_string({ argument });
return ss.str();
}
default: {
assert(false, "Bad Type.");
}
}
}
std::string to_qasm() const {
assert(controllers.size() == 0);
assert(dag == false);
std::stringstream ss;
switch (type) {
TO_STRING_CASE(CNOT) {
ss << "cx ";
ss << qasm_style_qubit(qubits[0]) << ", " << qasm_style_qubit(qubits[1]);
return ss.str();
}
TO_STRING_CASE(RX) {
ss << "rx ";
ss << params_to_string({ argument }) << " ";
ss << qasm_style_qubit(qubits[0]);
return ss.str();
}
TO_STRING_CASE(RY) {
ss << "ry ";
ss << params_to_string({ argument }) << " ";
ss << qasm_style_qubit(qubits[0]);
return ss.str();
}
TO_STRING_CASE(RZ) {
ss << "rz ";
ss << params_to_string({ argument }) << " ";
ss << qasm_style_qubit(qubits[0]);
return ss.str();
}
TO_STRING_CASE(H) {
ss << "h ";
ss << qasm_style_qubit(qubits[0]);
return ss.str();
}
TO_STRING_CASE(DIAGONAL) {
assert(false, "Bad Type.");
}
TO_STRING_CASE(I) {
ss << "Id ";
ss << qubits[0] << " ";
return ss.str();
}
default: assert(false, "Bad Type.");
}
return "";
}
~Gate() { }
};
template<typename qtraits = default_qtraits>
Gate<qtraits> CNOT(typename qtraits::qidx_t controller, typename qtraits::qidx_t target) {
return Gate(GateType::CNOT, { controller, target });
}
template<typename qtraits = default_qtraits>
Gate<qtraits> RX(typename qtraits::qidx_t q, typename qtraits::value_t arg = 0) {
return Gate<qtraits>(GateType::RX, { q }, arg);
}
template<typename qtraits = default_qtraits>
Gate<qtraits> RY(typename qtraits::qidx_t q, typename qtraits::value_t arg) {
return Gate(GateType::RY, { q }, arg);
}
template<typename qtraits = default_qtraits>
Gate<qtraits> RZ(typename qtraits::qidx_t q, typename qtraits::value_t arg) {
return Gate(GateType::RZ, { q }, arg);
}
template<typename qtraits = default_qtraits>
Gate<qtraits> H(typename qtraits::qidx_t q) {
return Gate(GateType::H, { q });
}
template<typename qtraits = default_qtraits>
Gate<qtraits> I(typename qtraits::qidx_t q, size_t time) {
using fp_t = typename qtraits::value_t;
return Gate(GateType::I, { q }, (fp_t)time);
}
class flatten {};
template<typename qtraits = default_qtraits>
class Circuit {
public:
using fp_t = typename qtraits::value_t;
using qid = typename qtraits::qidx_t;
qid max_qubit = 0;
std::list<Gate<qtraits>> gates;
Circuit() {}
void _circuit_append(const Circuit<qtraits>& c) {
if (c.max_qubit > max_qubit)
max_qubit = c.max_qubit;
for (auto gate : c.gates) {
gates.push_back(gate);
}
}
Circuit(const Circuit<qtraits>& c) {
_circuit_append(c);
}
Circuit valuecopy() const {
Circuit c;
c.max_qubit = max_qubit;
for (const auto& g : gates) {
c.gates.push_back(g.valuecopy());
}
return c;
}
Circuit& operator-(Gate<qtraits> g) {
if (g.type != GateType::DIAGONAL) {
for (auto qubit : g.qubits) {
if (max_qubit - 1 < qubit)
max_qubit = qubit + 1;
}
}
gates.push_back(g);
return *this;
}
Circuit& operator-(flatten flatten) {
for (auto &gate : gates) {
if (gate.dag) {
gate.dag = false;
switch (gate.type) {
case GateType::RX:
case GateType::RY:
case GateType::RZ:
gate.argument *= -1;
break;
default:
break;
}
}
}
return *this;
}
Circuit& operator-(const Circuit<qtraits> &c) {
_circuit_append(c);
return *this;
}
Circuit& dagger() {
gates.reverse();
for (auto& gate : gates) {
gate.dagger();
}
return newc;
}
Circuit control(std::vector<qid> controllers) {
Circuit newc(*this);
for (auto& gate : newc.gates) {
gate.control(controllers);
}
return newc;
}
std::string to_string() const {
std::stringstream ss_stat;
std::stringstream ss_header;
std::stringstream ss_groupdefs;
/* header*/
ss_header << "qubit : " << max_qubit << ";" << std::endl;
/* stat */
for (auto gate : gates) {
ss_stat << gate.to_string() << ";" << std::endl;
}
return ss_header.str() + "\n" + ss_stat.str() + "\n";
}
std::string to_qasm() const {
std::stringstream ss;
ss << "OPENQASM 2.0;" << std::endl
<< "include \"qelib1.inc\";" << std::endl
<< "qreg q[" << max_qubit << "];" << std::endl;
for (const auto &gate : gates) {
ss << gate.to_qasm() << ";" << std::endl;
}
return ss.str();
}
~Circuit() {}
};
void get_damping_kraus_op(
double* k0_real, double* k0_imag,
double* k1_real, double* k1_imag,
const int T1, const int T_gate);
void get_dephasing_kraus_op(
double* k0_real, double* k0_imag,
double* k1_real, double* k1_imag,
const int T1, const int T2, const int T_gate);
template<typename qtraits = default_qtraits>
class RealCircuit {
public:
using fp_t = typename qtraits::value_t;
using qid = typename qtraits::qidx_t;
struct Kraus {
fp_t *kraus0_real;
fp_t *kraus0_imag;
fp_t *kraus1_real;
fp_t *kraus1_imag;
};
std::vector<Kraus> one_qubit_damping_kraus;
std::vector<Kraus> one_qubit_dephasing_kraus;
std::vector<Kraus> two_qubit_damping_kraus;
std::vector<Kraus> two_qubit_dephasing_kraus;
Circuit<qtraits> c;
Circuit<qtraits> real_circuit;
int one_qubit_gate_time;
int two_qubit_gate_time;
std::vector<int> T1;
std::vector<int> T2;
std::vector<fp_t> one_qubit_gate_error;
std::vector<std::vector<fp_t>> two_qubit_gate_error;
std::vector<std::vector<GateType>> clock_cycle;
bool pre_gen_used = false;
static struct Config {
int one_qubit_gate_time = 1;
int two_qubit_gate_time = 2;
int global_T1 = 1000;
int global_T2 = 1000;
fp_t one_qubit_gate_error = 0.01;
fp_t two_qubit_gate_error = 0.02;
} default_config;
RealCircuit() {}
void assign_circuit(const Circuit<qtraits>& c_) {
one_qubit_gate_error.assign(c_.max_qubit, default_config.one_qubit_gate_error);
T1.assign(c_.max_qubit, default_config.global_T1);
T2.assign(c_.max_qubit, default_config.global_T2);
one_qubit_gate_time = default_config.one_qubit_gate_time;
two_qubit_gate_time = default_config.two_qubit_gate_time;
two_qubit_gate_error.resize(c_.max_qubit);
for (auto& e : two_qubit_gate_error) {
e.resize(c_.max_qubit, default_config.two_qubit_gate_error);
}
clock_cycle.resize(c_.max_qubit);
}
explicit RealCircuit(const Circuit<qtraits>& c_) :
c(c_.valuecopy()) {
assign_circuit(c_);
}
RealCircuit(const RealCircuit<qtraits>&) = delete;
//RealCircuit(RealCircuit<qtraits>&& rc) {
// c = std::move(rc.c);
// real_circuit = std::move(rc.real_circuit);
// one_qubit_gate_time = rc.one_qubit_gate_time;
// two_qubit_gate_time = rc.two_qubit_gate_time;
// T1 = std::move(rc.T1);
// T2 = std::move(rc.T2);
// one_qubit_gate_error = std::move(rc.one_qubit_gate_error);
// two_qubit_gate_error = std::move(rc.two_qubit_gate_error);
// clock_cycle = std::move(rc.clock_cycle);
// pre_gen_used = rc.pre_gen_used;
//}
RealCircuit<qtraits>&& valuecopy() {
RealCircuit<qtraits> rc;
rc.c = c.valuecopy();
rc.real_circuit = real_circuit.valuecopy();
rc.one_qubit_gate_time = one_qubit_gate_time;
rc.two_qubit_gate_time = two_qubit_gate_time;
rc.T1 = T1;
rc.T2 = T2;
rc.one_qubit_gate_error = one_qubit_gate_error;
rc.two_qubit_gate_error = two_qubit_gate_error;
rc.clock_cycle = clock_cycle;
rc.pre_gen_used = pre_gen_used;
return rc;
}
void analyze_clock() {
for (const auto &gate : c.gates) {
assert(gate.controllers.size() == 0, "No controller is allowed.");
assert(gate.dag == false, "Flatten first.");
switch (gate.type) {
case GateType::RX:
case GateType::RY:
case GateType::RZ:
case GateType::H:
add_single_qubit(gate, gate.qubits[0]);
break;
case GateType::CNOT:
add_two_qubit(gate, gate.qubits[0], gate.qubits[1]);
break;
case GateType::I:
add_I(gate, gate.qubits[0]);
break;
default:
assert(false, "Not supported gate type.");
}
}
}
void generate_kraus_op() {
qid qsize = c.max_qubit;
/* one_qubit_damping_kraus */
for (qid i = 0; i < qsize; ++i) {
fp_t *kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_damping_kraus_op(kraus0_real, kraus0_imag,
kraus1_real, kraus1_imag,
T1[i], one_qubit_gate_time);
Kraus one_damping;
one_damping.kraus0_real = kraus0_real;
one_damping.kraus0_imag = kraus0_imag;
one_damping.kraus1_real = kraus1_real;
one_damping.kraus1_imag = kraus1_imag;
one_qubit_damping_kraus.push_back(one_damping);
}
/* one_qubit_dephasing_kraus */
for (qid i = 0; i < qsize; ++i) {
fp_t *kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_dephasing_kraus_op(kraus0_real, kraus0_imag,
kraus1_real, kraus1_imag, T1[i],
T2[i], one_qubit_gate_time);
Kraus one_dephasing;
one_dephasing.kraus0_real = kraus0_real;
one_dephasing.kraus0_imag = kraus0_imag;
one_dephasing.kraus1_real = kraus1_real;
one_dephasing.kraus1_imag = kraus1_imag;
one_qubit_dephasing_kraus.push_back(one_dephasing);
}
/* two_qubit_damping_kraus */
for (qid i = 0; i < qsize; ++i) {
fp_t *kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_damping_kraus_op(kraus0_real, kraus0_imag,
kraus1_real, kraus1_imag,
T1[i], two_qubit_gate_time);
Kraus two_damping;
two_damping.kraus0_real = kraus0_real;
two_damping.kraus0_imag = kraus0_imag;
two_damping.kraus1_real = kraus1_real;
two_damping.kraus1_imag = kraus1_imag;
two_qubit_damping_kraus.push_back(two_damping);
}
/* two_qubit_dephasing_kraus */
for (qid i = 0; i < qsize; ++i) {
fp_t *kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t *kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_dephasing_kraus_op(kraus0_real, kraus0_imag,
kraus1_real, kraus1_imag, T1[i],
T2[i], two_qubit_gate_time);
Kraus two_dephasing;
two_dephasing.kraus0_real = kraus0_real;
two_dephasing.kraus0_imag = kraus0_imag;
two_dephasing.kraus1_real = kraus1_real;
two_dephasing.kraus1_imag = kraus1_imag;
two_qubit_dephasing_kraus.push_back(two_dephasing);
}
}
void add_single_qubit(Gate<qtraits> g, qid q) {
clock_cycle[q].push_back(g.type);
for (int i = 1; i < one_qubit_gate_time; ++i)
clock_cycle[q].push_back(GateType(-1));
real_circuit - g;
}
void add_two_qubit(Gate<qtraits> g, qid q1, qid q2) {
size_t front1 = clock_cycle[q1].size();
size_t front2 = clock_cycle[q2].size();
if (front1 > front2) {
clock_cycle[q2].push_back(GateType::I);
for (int i = 1; i < front1 - front2; ++i)
clock_cycle[q2].push_back(GateType(-1));
real_circuit - I(q2, front1 - front2);
}
else if (front2 > front1) {
clock_cycle[q1].push_back(GateType::I);
for (int i = 1; i < front2 - front1; ++i)
clock_cycle[q1].push_back(GateType(-1));
real_circuit - I(q1, front2 - front1);
}
clock_cycle[q1].push_back(g.type);
clock_cycle[q2].push_back(g.type);
for (int i = 1; i < two_qubit_gate_time; ++i) {
clock_cycle[q1].push_back(GateType(-1));
clock_cycle[q2].push_back(GateType(-1));
}
real_circuit - g;
}
void add_I(Gate<qtraits> g, qid q) {
clock_cycle[q].push_back(g.type);
for (int i = 1; i < int(g.argument); ++i)
clock_cycle[q].push_back(GateType(-1));
real_circuit - g;
}
void ready() {
analyze_clock();
if (use_pre_gen && !pre_gen_used) {
pre_gen_used = true;
one_qubit_damping_kraus.assign(pre_gen_1qdam.begin(), pre_gen_1qdam.end());
one_qubit_dephasing_kraus.assign(pre_gen_1qdep.begin(), pre_gen_1qdep.end());
two_qubit_damping_kraus.assign(pre_gen_2qdam.begin(), pre_gen_2qdam.end());
two_qubit_dephasing_kraus.assign(pre_gen_2qdep.begin(), pre_gen_2qdep.end());
}
else {
generate_kraus_op();
}
}
std::string to_string() const {
std::stringstream ss;
for (auto cc : clock_cycle) {
for (auto p : cc) {
if ((int)p == -1)
ss << std::setw(3) << " ";
else
ss << std::setw(3) << (int)p;
}
ss << std::endl;
}
return ss.str();
}
~RealCircuit() {
if (pre_gen_used)
return;
for (auto p : one_qubit_damping_kraus) {
free(p.kraus0_real);
free(p.kraus0_imag);
free(p.kraus1_real);
free(p.kraus1_imag);
}
for (auto p : one_qubit_dephasing_kraus) {
free(p.kraus0_real);
free(p.kraus0_imag);
free(p.kraus1_real);
free(p.kraus1_imag);
}
for (auto p : two_qubit_damping_kraus) {
free(p.kraus0_real);
free(p.kraus0_imag);
free(p.kraus1_real);
free(p.kraus1_imag);
}
for (auto p : two_qubit_dephasing_kraus) {
free(p.kraus0_real);
free(p.kraus0_imag);
free(p.kraus1_real);
free(p.kraus1_imag);
}
}
static std::vector<Kraus> pre_gen_1qdam;
static std::vector<Kraus> pre_gen_1qdep;
static std::vector<Kraus> pre_gen_2qdam;
static std::vector<Kraus> pre_gen_2qdep;
static void pre_gen_kraus(size_t qsize) {
use_pre_gen = true;
auto& T1 = decltype(*this)::default_config.global_T1;
auto& T2 = decltype(*this)::default_config.global_T2;
auto& one_qubit_gate_time = decltype(*this)::default_config.one_qubit_gate_time;
auto& two_qubit_gate_time = decltype(*this)::default_config.two_qubit_gate_time;
/* one_qubit_damping_kraus */
for (qid i = 0; i < qsize; ++i) {
fp_t* kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_damping_kraus_op(kraus0_real, kraus0_imag,
kraus1_real, kraus1_imag,
T1, one_qubit_gate_time);
Kraus one_damping;
one_damping.kraus0_real = kraus0_real;
one_damping.kraus0_imag = kraus0_imag;
one_damping.kraus1_real = kraus1_real;
one_damping.kraus1_imag = kraus1_imag;
pre_gen_1qdam.push_back(one_damping);
}
/* one_qubit_dephasing_kraus */
for (qid i = 0; i < qsize; ++i) {
fp_t* kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_dephasing_kraus_op(kraus0_real, kraus0_imag,
kraus1_real, kraus1_imag, T1,
T2, one_qubit_gate_time);
Kraus one_dephasing;
one_dephasing.kraus0_real = kraus0_real;
one_dephasing.kraus0_imag = kraus0_imag;
one_dephasing.kraus1_real = kraus1_real;
one_dephasing.kraus1_imag = kraus1_imag;
pre_gen_1qdep.push_back(one_dephasing);
}
/* two_qubit_damping_kraus */
for (qid i = 0; i < qsize; ++i) {
fp_t* kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_damping_kraus_op(kraus0_real, kraus0_imag,
kraus1_real, kraus1_imag,
T1, two_qubit_gate_time);
Kraus two_damping;
two_damping.kraus0_real = kraus0_real;
two_damping.kraus0_imag = kraus0_imag;
two_damping.kraus1_real = kraus1_real;
two_damping.kraus1_imag = kraus1_imag;
pre_gen_2qdam.push_back(two_damping);
}
/* two_qubit_dephasing_kraus */
for (qid i = 0; i < qsize; ++i) {
fp_t* kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t));
fp_t* kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_dephasing_kraus_op(kraus0_real, kraus0_imag,
kraus1_real, kraus1_imag, T1,
T2, two_qubit_gate_time);
Kraus two_dephasing;
two_dephasing.kraus0_real = kraus0_real;
two_dephasing.kraus0_imag = kraus0_imag;
two_dephasing.kraus1_real = kraus1_real;
two_dephasing.kraus1_imag = kraus1_imag;
pre_gen_2qdep.push_back(two_dephasing);
}
}
static void set_use_pre_gen(bool use) {
use_pre_gen = use;
}
static bool use_pre_gen;
};
template<typename qtraits>
bool RealCircuit<qtraits>::use_pre_gen = false;
template<typename qtraits>
typename RealCircuit<qtraits>::Config
RealCircuit<qtraits>::default_config;
template<typename qtraits>
std::vector<typename RealCircuit<qtraits>::Kraus>
RealCircuit<qtraits>::pre_gen_1qdam;
template<typename qtraits>
std::vector<typename RealCircuit<qtraits>::Kraus>
RealCircuit<qtraits>::pre_gen_1qdep;
template<typename qtraits>
std::vector<typename RealCircuit<qtraits>::Kraus>
RealCircuit<qtraits>::pre_gen_2qdam;
template<typename qtraits>
std::vector<typename RealCircuit<qtraits>::Kraus>
RealCircuit<qtraits>::pre_gen_2qdep;
template<typename qtraits_t = default_qtraits>
struct result_analyzer {
using qtraits = qtraits_t;
using qid = typename qtraits::qidx_t;
using fp_t = typename qtraits::value_t;
using uint_t = typename qtraits::idx_t;
static size_t count_zero(uint_t* result, size_t shots) {
size_t zeros = 0;
for (size_t i = 0; i < shots; ++i) {
if (result[i] == 0)
zeros++;
}
return zeros;
}
static size_t count_n(uint_t* result, size_t shots, uint_t n) {
size_t count = 0;
for (size_t i = 0; i < shots; ++i) {
if (result[i] == n)
count++;
}
return count;
}
static std::vector<qid> generate_qubit_idxes(qid max_qubit) {
std::vector<qid> q(max_qubit, 0);
for (qid i = 0; i < max_qubit; ++i) {
q[i] = i;
}
return q;
}
static std::pair<uint_t, size_t> find_max(uint_t* result, size_t shots) {
std::map<uint_t, size_t> max_counter;
std::pair<uint_t, size_t> max = { 0,0 };
for (size_t i = 0; i < shots; ++i) {
if (max_counter.find(result[i]) == max_counter.end()) {
max_counter[result[i]] = 0;
}
max_counter[result[i]]++;
size_t &p = max_counter[result[i]];
if (p > max.second) {
max = { result[i], p };
}
}
return max;
}
static fp_t get_expectation_from_amplitude(fp_t *real, fp_t *imag, size_t size, fp_t *diag_h) {
fp_t expectation = 0;
for (size_t i = 0; i < size; ++i) {
fp_t prob = real[i] * real[i] + imag[i] * imag[i];
expectation += prob * diag_h[i];
}
return expectation;
}
static double get_expectation(const uint_t *res, const uint_t size, const std::vector<double> &diag) {
double exp = 0;
for (uint_t i = 0; i < size; ++i) {
exp += diag[res[i]];
}
return exp / size;
}
static std::vector<fp_t> get_meas_probs(const uint_t *res, const uint_t shots, const uint_t size) {
std::vector<fp_t> probs(size, 0);
for (uint_t i = 0; i < shots; ++i) {
probs[res[i]] += (1.0 / shots);
}
return probs;
}
static std::vector<uint_t> get_meas_count(const uint_t *res, const uint_t shots, const uint_t size) {
std::vector<uint_t> count(size, 0);
for (uint_t i = 0; i < shots; ++i) {
count[res[i]]++;
}
return count;
}
static bool check_error(fp_t *prob1, fp_t *prob2, uint_t size, fp_t error_bound) {
for (uint_t i = 0; i < size; ++i) {
if (abs(prob1[i] - prob2[i]) > error_bound) {
return false;
}
}
return true;
}
static fp_t get_norm_2(const fp_t *prob1, const fp_t *prob2, const uint_t size) {
fp_t norm2 = 0;
for (uint_t i = 0; i < size; ++i) {
norm2 += (prob1[i] - prob2[i])*(prob1[i] - prob2[i]);
}
norm2 = sqrt(norm2);
return norm2;
}
static fp_t get_norm_inf(const fp_t *prob1, const fp_t *prob2, const uint_t size) {
fp_t norminf = 0;
for (uint_t i = 0; i < size; ++i) {
fp_t ninf = abs(prob1[i] - prob2[i]);
if (ninf > norminf) norminf = ninf;
}
return norminf;
}
};
template<typename qtraits = default_qtraits>
struct simulator_v1 {
using fp_t = typename qtraits::value_t;
using uint_t = typename qtraits::idx_t;
using qid = typename qtraits::qidx_t;
static void run_circuit(const Circuit<qtraits> &c, fp_t *real, fp_t *imag, uint_t size) {
const qid &qn = c.max_qubit;
// auto &groupdefs = c.groupdefs;
size_t groupdef_iter = 0;
for (const Gate<qtraits> &gate : c.gates) {
switch (gate.type) {
case GateType::RX:
state_manipulator<qtraits>::rx(real, imag, size, gate.argument, gate.qubits[0]);
break;
case GateType::RY:
state_manipulator<qtraits>::ry(real, imag, size, gate.argument, gate.qubits[0]);
break;
case GateType::RZ:
state_manipulator<qtraits>::rz(real, imag, size, gate.argument, gate.qubits[0]);
break;
case GateType::H:
state_manipulator<qtraits>::h(real, imag, size, gate.qubits[0]);
break;
case GateType::CNOT:
state_manipulator<qtraits>::cnot(real, imag, size, gate.qubits[0], gate.qubits[1]);
break;
case GateType::I:
break;
default:
assert(false, "Bad Type.");
}
}
}
static uint_t *simulate_N(size_t n, const Circuit<qtraits> &c, RandomEngine *rng) {
// first allocate n times memory
uint_t size = pow2(c.max_qubit);
fp_t *real_n = (fp_t*)malloc(n * size * sizeof(fp_t));
fp_t *imag_n = (fp_t*)malloc(n * size * sizeof(fp_t));
memset(real_n, 0, n * size * sizeof(fp_t));
memset(imag_n, 0, n * size * sizeof(fp_t));
uint_t *result = (uint_t*)malloc(n * sizeof(uint_t));
memset(result, 0, sizeof(uint_t)*n);
for (int i = 0; i < n; ++i) {
fp_t* real = &real_n[i * size];
fp_t* imag = &imag_n[i * size];
real[0] = 1;
run_circuit(c, real, imag, size);
fp_t randnum = (fp_t)(*rng)();
fp_t total_prob = 0;
for (uint_t j = 0; j < size; ++j) {
fp_t pi = real[j] * real[j] + imag[j] * imag[j];
total_prob += pi;
if (randnum < total_prob) {
result[i] = j; break;
}
}
}
free(real_n);
free(imag_n);
return result;
}
/*
This version will assume the real, imag is refreshed to zero state.
and result has enough space.
*/
static void simulate_N(size_t n, const Circuit<qtraits> &c, RandomEngine** rng,
fp_t *real_n, fp_t *imag_n, uint_t *result) {
uint_t size = pow2(c.max_qubit);
memset(real_n, 0, n * size * sizeof(fp_t));
memset(imag_n, 0, n * size * sizeof(fp_t));
for (int i = 0; i < n; ++i) {
fp_t* real = &real_n[i * size];
fp_t* imag = &imag_n[i * size];
real[0] = 1;
run_circuit(c, real, imag, size);
fp_t randnum = (fp_t)(*rng[i])();
fp_t total_prob = 0;
for (uint_t j = 0; j < size; ++j) {
fp_t pi = real[j] * real[j] + imag[j] * imag[j];
total_prob += pi;
if (randnum < total_prob) {
result[i] = j; break;
}
}
}
}
static uint_t* simulate_N_threads(size_t n, const Circuit<qtraits>& c, RandomEngine** rng) {
// first allocate n times memory
uint_t size = pow2(c.max_qubit);
fp_t* real_n = (fp_t*)malloc(n * size * sizeof(fp_t));
fp_t* imag_n = (fp_t*)malloc(n * size * sizeof(fp_t));
memset(real_n, 0, n * size * sizeof(fp_t));
memset(imag_n, 0, n * size * sizeof(fp_t));
uint_t* result = (uint_t*)malloc(n * sizeof(uint_t));
memset(result, 0, sizeof(uint_t) * n);
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
fp_t* real = &real_n[i * size];
fp_t* imag = &imag_n[i * size];
real[0] = 1;
run_circuit(c, real, imag, size);
fp_t randnum = (fp_t)(*rng[i])();
fp_t total_prob = 0;
for (uint_t j = 0; j < size; ++j) {
fp_t pi = real[j] * real[j] + imag[j] * imag[j];
total_prob += pi;
if (randnum < total_prob) {
result[i] = j; break;
}
}
}
free(real_n);
free(imag_n);
return result;
}
static void simulate_N_threads(size_t n, const Circuit<qtraits> &c, RandomEngine** rng,
fp_t* real_n, fp_t* imag_n, uint_t* result) {
uint_t size = pow2(c.max_qubit);
memset(real_n, 0, n * size * sizeof(fp_t));
memset(imag_n, 0, n * size * sizeof(fp_t));
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
fp_t* real = &real_n[i * size];
fp_t* imag = &imag_n[i * size];
real[0] = 1;
run_circuit(c, real, imag, size);
fp_t randnum = (fp_t)(*rng[i])();
fp_t total_prob = 0;
for (uint_t j = 0; j < size; ++j) {
fp_t pi = real[j] * real[j] + imag[j] * imag[j];
total_prob += pi;
if (randnum < total_prob) {
result[i] = j; break;
}
}
}
}
static void run_real_circuit(const RealCircuit<qtraits> &rc,
fp_t *real, fp_t *imag, uint_t size, RandomEngine* rng) {
auto &p1 = rc.one_qubit_gate_error;
auto &p2 = rc.two_qubit_gate_error;
auto &T1 = rc.T1;
auto &T2 = rc.T2;
auto &one_qubit = rc.one_qubit_gate_time;
auto &two_qubit = rc.two_qubit_gate_time;
const Circuit<qtraits>& c = rc.real_circuit;
const qid &qn = c.max_qubit;
/*debug_info_s(Circuit:);
debug_print_circuit;*/
for (auto &gate : c.gates) {
// incoherent error
// for first qubit
fp_t *damp0_real = nullptr, *damp0_imag = nullptr,
*damp1_real = nullptr, *damp1_imag = nullptr;
fp_t *dephase0_real = nullptr, *dephase0_imag = nullptr,
*dephase1_real = nullptr, *dephase1_imag = nullptr;
// for second qubit
fp_t *damp0_real2 = nullptr, *damp0_imag2 = nullptr,
*damp1_real2 = nullptr, *damp1_imag2 = nullptr;
fp_t *dephase0_real2 = nullptr, *dephase0_imag2 = nullptr,
*dephase1_real2 = nullptr, *dephase1_imag2 = nullptr;
if (gate.type == GateType::I) {
damp0_real = (fp_t*)malloc(4 * sizeof(fp_t));
damp0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
damp1_real = (fp_t*)malloc(4 * sizeof(fp_t));
damp1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
dephase0_real = (fp_t*)malloc(4 * sizeof(fp_t));
dephase0_imag = (fp_t*)malloc(4 * sizeof(fp_t));
dephase1_real = (fp_t*)malloc(4 * sizeof(fp_t));
dephase1_imag = (fp_t*)malloc(4 * sizeof(fp_t));
get_damping_kraus_op(
damp0_real, damp0_imag, damp1_real, damp1_imag,
T1[gate.qubits[0]], (int)gate.argument);
get_dephasing_kraus_op(
dephase0_real, dephase0_imag, dephase1_real, dephase1_imag,
T1[gate.qubits[0]], T2[gate.qubits[0]], (int)gate.argument);
}
else if (gate.qubits.size() == 1) {
auto &damp = rc.one_qubit_damping_kraus[gate.qubits[0]];
damp0_real = damp.kraus0_real;
damp0_imag = damp.kraus0_imag;
damp1_real = damp.kraus1_real;
damp1_imag = damp.kraus1_imag;
auto &dephase = rc.one_qubit_dephasing_kraus[gate.qubits[0]];
dephase0_real = dephase.kraus0_real;
dephase0_imag = dephase.kraus0_imag;
dephase1_real = dephase.kraus1_real;
dephase1_imag = dephase.kraus1_imag;
}
else if (gate.qubits.size() == 2) {
auto &damp = rc.two_qubit_damping_kraus[gate.qubits[0]];
damp0_real = damp.kraus0_real;
damp0_imag = damp.kraus0_imag;
damp1_real = damp.kraus1_real;
damp1_imag = damp.kraus1_imag;
auto &dephase = rc.two_qubit_dephasing_kraus[gate.qubits[0]];
dephase0_real = dephase.kraus0_real;
dephase0_imag = dephase.kraus0_imag;
dephase1_real = dephase.kraus1_real;
dephase1_imag = dephase.kraus1_imag;
auto &damp2 = rc.two_qubit_damping_kraus[gate.qubits[1]];
damp0_real2 = damp.kraus0_real;
damp0_imag2 = damp.kraus0_imag;
damp1_real2 = damp.kraus1_real;
damp1_imag2 = damp.kraus1_imag;
auto &dephase2 = rc.two_qubit_dephasing_kraus[gate.qubits[1]];
dephase0_real2 = dephase.kraus0_real;
dephase0_imag2 = dephase.kraus0_imag;
dephase1_real2 = dephase.kraus1_real;
dephase1_imag2 = dephase.kraus1_imag;
}
else assert(false, "Bad Gate Number.");
double r0 = (*rng)();
double r1 = (*rng)();
/* debug_info_s(Before Kraus);
debug_output_state;
debug_display(r0);
debug_display(r1);*/
state_manipulator<qtraits>::perform_kraus(real, imag, size, damp0_real, damp0_imag,
damp1_real, damp1_imag, gate.qubits[0], r0);
state_manipulator<qtraits>::perform_kraus(real, imag, size, dephase0_real, dephase0_imag,
dephase1_real, dephase1_imag, gate.qubits[0], r1);
if (gate.type == GateType::I) {
free(damp0_real);
free(damp0_imag);
free(damp1_real);
free(damp1_imag);
free(dephase0_real);
free(dephase0_imag);
free(dephase1_real);
free(dephase1_imag);
}
if (gate.qubits.size() == 2) {
double p0 = (*rng)();
double p1 = (*rng)();
state_manipulator<qtraits>::perform_kraus(real, imag, size, damp0_real2, damp0_imag2,
damp1_real2, damp1_imag2, gate.qubits[1], p0);
state_manipulator<qtraits>::perform_kraus(real, imag, size, dephase0_real2, dephase0_imag2,
dephase1_real2, dephase1_imag2, gate.qubits[1], p1);
}
// coherent error
double p_bad_gate = (*rng)();
if (gate.type != GateType::I) {
if (gate.qubits.size() == 1) {
if (p_bad_gate < p1[gate.qubits[0]]) {
goto SkipGate;
}
}
else if (gate.qubits.size() == 2) {
if (p_bad_gate < p2[gate.qubits[0]][gate.qubits[1]]) {
goto SkipGate;
}
}
else assert(false, "Bad Gate Number.");
}
/*debug_info_s(After Kraus);
debug_output_state;*/
switch (gate.type) {
case GateType::RX:
state_manipulator<qtraits>::rx(real, imag, size, gate.argument, gate.qubits[0]);
break;
case GateType::RY:
state_manipulator<qtraits>::ry(real, imag, size, gate.argument, gate.qubits[0]);
break;
case GateType::RZ:
state_manipulator<qtraits>::rz(real, imag, size, gate.argument, gate.qubits[0]);
break;
case GateType::H:
state_manipulator<qtraits>::h(real, imag, size, gate.qubits[0]);
break;
case GateType::CNOT:
state_manipulator<qtraits>::cnot(real, imag, size, gate.qubits[0], gate.qubits[1]);
break;
case GateType::I:
break;
default:
assert(false, "Bad Type.");
}
SkipGate:;
/*debug_info_s(Last);
debug_output_state;
debug_pause;*/
}
}
static uint_t *simulate_N_noisy(size_t n, const RealCircuit<qtraits> &c, RandomEngine **rng) {
// first allocate n times memory
uint_t size = pow2(c.real_circuit.max_qubit);
fp_t *real_n = (fp_t*)malloc(n * size * sizeof(fp_t));
fp_t *imag_n = (fp_t*)malloc(n * size * sizeof(fp_t));
memset(real_n, 0, n * size * sizeof(fp_t));
memset(imag_n, 0, n * size * sizeof(fp_t));
uint_t *result = (uint_t*)malloc(n * sizeof(uint_t));
memset(result, 0, sizeof(uint_t)*n);
for (int i = 0; i < n; ++i) {
fp_t* real = &real_n[i * size];
fp_t* imag = &imag_n[i * size];
real[0] = 1;
run_real_circuit(c, real, imag, size, rng[i]);
fp_t randnum = (fp_t)(*rng[i])();
fp_t total_prob = 0;
for (uint_t j = 0; j < size; ++j) {
fp_t pi = real[j] * real[j] + imag[j] * imag[j];
total_prob += pi;
if (randnum < total_prob) {
result[i] = j; break;
}
}
}
free(real_n);
free(imag_n);
return result;
}
static void simulate_N_noisy(size_t n, const RealCircuit<qtraits>& c, RandomEngine** rng,
fp_t *real_n, fp_t *imag_n, uint_t* result) {
// first allocate n times memory
uint_t size = pow2(c.real_circuit.max_qubit);
memset(real_n, 0, n * size * sizeof(fp_t));
memset(imag_n, 0, n * size * sizeof(fp_t));
for (int i = 0; i < n; ++i) {
fp_t* real = &real_n[i * size];
fp_t* imag = &imag_n[i * size];
real[0] = 1;
run_real_circuit(c, real, imag, size, rng[i]);
fp_t randnum = (fp_t)(*rng[i])();
fp_t total_prob = 0;
for (uint_t j = 0; j < size; ++j) {
fp_t pi = real[j] * real[j] + imag[j] * imag[j];
total_prob += pi;
if (randnum < total_prob) {
result[i] = j; break;
}
}
}
}
static uint_t *simulate_N_threads_noisy(size_t n, const RealCircuit<qtraits> &c, RandomEngine **rng) {
// first allocate n times memory
uint_t size = pow2(c.real_circuit.max_qubit);
fp_t *real_n = (fp_t*)malloc(n * size * sizeof(fp_t));
fp_t *imag_n = (fp_t*)malloc(n * size * sizeof(fp_t));
memset(real_n, 0, n * size * sizeof(fp_t));
memset(imag_n, 0, n * size * sizeof(fp_t));
uint_t *result = (uint_t*)malloc(n * sizeof(uint_t));
// memset(result, 0, sizeof(uint_t)*n);
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
fp_t* real = &real_n[i * size];
fp_t* imag = &imag_n[i * size];
real[0] = 1;
run_real_circuit(c, real, imag, size, rng[i]);
fp_t randnum = (fp_t)(*rng[i])();
fp_t total_prob = 0;
for (uint_t j = 0; j < size; ++j) {
fp_t pi = real[j] * real[j] + imag[j] * imag[j];
total_prob += pi;
if (randnum < total_prob) {
result[i] = j; break;
}
}
}
free(real_n);
free(imag_n);
return result;
}
static void simulate_N_threads_noisy(size_t n, const RealCircuit<qtraits>& c, RandomEngine** rng,
fp_t* real_n, fp_t* imag_n, uint_t *result) {
// first allocate n times memory
uint_t size = pow2(c.real_circuit.max_qubit);
memset(real_n, 0, n * size * sizeof(fp_t));
memset(imag_n, 0, n * size * sizeof(fp_t));
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
fp_t* real = &real_n[i * size];
fp_t* imag = &imag_n[i * size];
real[0] = 1;
run_real_circuit(c, real, imag, size, rng[i]);
fp_t randnum = (fp_t)(*rng[i])();
fp_t total_prob = 0;
for (uint_t j = 0; j < size; ++j) {
fp_t pi = real[j] * real[j] + imag[j] * imag[j];
total_prob += pi;
if (randnum < total_prob) {
result[i] = j; break;
}
}
}
}
};
/* simulator modes */
constexpr bool noisy = true;
constexpr bool noise_free = false;
constexpr bool single_thread = false;
constexpr bool multi_threads = true;
ns_end |
VerletClusterLists.h | /**
* @file VerletClusterLists.h
* @author nguyen
* @date 14.10.18
*/
#pragma once
#include <cmath>
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/CompatibleTraversals.h"
#include "autopas/containers/ParticleContainer.h"
#include "autopas/containers/verletClusterLists/VerletClusterMaths.h"
#include "autopas/iterators/ParticleIterator.h"
#include "autopas/utils/ArrayMath.h"
#include "autopas/utils/inBox.h"
namespace autopas {
template <class Particle>
class VerletClustersTraversalInterface;
/**
* Particles are divided into clusters.
* The VerletClusterLists class uses neighborhood lists for each cluster
* to calculate pairwise interactions of particles.
* It is optimized for a constant, i.e. particle independent, cutoff radius of
* the interaction.
* @tparam Particle
*/
template <class Particle>
class VerletClusterLists : public ParticleContainer<Particle, FullParticleCell<Particle>> {
/**
* the index type to access the particle cells
*/
typedef VerletClusterMaths::index_t index_t;
public:
/**
* Constructor of the VerletClusterLists class.
* The neighbor lists are build using a estimated density.
* The box is divided into cuboids with roughly the
* same side length.
* @param boxMin the lower corner of the domain
* @param boxMax the upper corner of the domain
* @param cutoff the cutoff radius of the interaction
* @param skin the skin radius
* @param clusterSize size of clusters
*/
VerletClusterLists(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, double cutoff,
double skin = 0, int clusterSize = 4)
: ParticleContainer<Particle, FullParticleCell<Particle>>(boxMin, boxMax, cutoff, skin),
_clusterSize(clusterSize),
_numClusters(0),
_boxMin(boxMin),
_boxMax(boxMax),
_skin(skin),
_cutoff(cutoff),
_neighborListIsNewton3(false),
_interactionLengthSqr((cutoff + skin) * (cutoff + skin)) {
rebuild(false);
}
ContainerOption getContainerType() override { return ContainerOption::verletClusterLists; }
void iteratePairwise(TraversalInterface *traversal) override {
AutoPasLog(debug, "Using traversal {}.", utils::StringUtils::to_string(traversal->getTraversalType()));
auto *traversalInterface = dynamic_cast<VerletClustersTraversalInterface<Particle> *>(traversal);
if (traversalInterface) {
traversalInterface->setClusterLists(*this);
} else {
autopas::utils::ExceptionHandler::exception(
"Trying to use a traversal of wrong type in VerletClusterLists::iteratePairwise. TraversalID: {}",
traversal->getTraversalType());
}
traversal->initTraversal();
traversal->traverseParticlePairs();
traversal->endTraversal();
}
/**
* @copydoc VerletLists::addParticle()
*/
void addParticle(Particle &p) override {
// add particle somewhere, because lists will be rebuild anyways
_clusters[0].addParticle(p);
}
/**
* @copydoc VerletLists::addHaloParticle()
*/
void addHaloParticle(Particle &haloParticle) override {
autopas::utils::ExceptionHandler::exception("VerletClusterLists.addHaloParticle not yet implemented.");
}
bool updateHaloParticle(Particle &haloParticle) override { throw std::runtime_error("not yet implemented"); }
/**
* @copydoc VerletLists::deleteHaloParticles
*/
void deleteHaloParticles() override {
// quick and dirty: iterate over all particles and delete halo particles
// @todo: make this proper
for (auto iter = this->begin(IteratorBehavior::haloOnly); iter.isValid(); ++iter) {
if (not iter->isOwned()) {
iter.deleteCurrentParticle();
}
}
}
/**
* @copydoc VerletLists::updateContainer()
*/
AUTOPAS_WARN_UNUSED_RESULT
std::vector<Particle> updateContainer() override {
AutoPasLog(debug, "updating container");
// first delete all particles
this->deleteHaloParticles();
// next find invalid particles
std::vector<Particle> invalidParticles;
/// @todo: parallelize
for (auto iter = this->begin(IteratorBehavior::ownedOnly); iter.isValid(); ++iter) {
if (not utils::inBox(iter->getR(), _boxMin, _boxMax)) {
invalidParticles.push_back(*iter);
iter.deleteCurrentParticle();
}
}
return invalidParticles;
}
bool isContainerUpdateNeeded() override {
autopas::utils::ExceptionHandler::exception("VerletClusterLists.isContainerUpdateNeeded not yet implemented");
return false;
}
TraversalSelectorInfo getTraversalSelectorInfo() override { return TraversalSelectorInfo(_cellsPerDim); }
ParticleIteratorWrapper<Particle> begin(IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override {
return ParticleIteratorWrapper<Particle>(
new internal::ParticleIterator<Particle, FullParticleCell<Particle>>(&this->_clusters));
}
ParticleIteratorWrapper<Particle> getRegionIterator(
const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner,
IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override {
// @todo implement this if bounding boxes are here
autopas::utils::ExceptionHandler::exception("VerletClusterLists.getRegionIterator not yet implemented.");
return ParticleIteratorWrapper<Particle>();
}
void rebuildNeighborLists(TraversalInterface *traversal) override { rebuild(traversal->getUseNewton3()); }
/**
* Helper method to iterate over all clusters.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @tparam inParallel If the iteration should be executed in parallel or sequential. See traverseClustersParallel()
* for thread safety.
* @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, int
* clusterSize, std::vector<Particle*> clusterNeighborList.
*/
template <bool inParallel, class LoopBody>
void traverseClusters(LoopBody &&loopBody) {
if (inParallel) {
traverseClustersParallel<LoopBody>(std::forward<LoopBody>(loopBody));
} else {
traverseClustersSequential<LoopBody>(std::forward<LoopBody>(loopBody));
}
}
unsigned long getNumParticles() override {
unsigned long sum = 0;
for (size_t index = 0; index < _clusters.size(); index++) {
sum += _clusters[index].numParticles();
}
return sum;
}
/**
* Returns the ClusterIndexMap for usage in the traversals of this container.
* @return the ClusterIndexMap.
*/
const auto &getClusterIndexMap() const { return _clusterIndexMap; }
/**
* Returns the number of clusters in this container.
* @return The number of clusters in this container.
*/
auto getNumClusters() const { return _numClusters; }
/**
* Returns the neighbor lists of this container.
* @return the neighbor lists of this container.
*/
const auto &getNeighborLists() const { return _neighborLists; }
/**
* Returns the grid side length of the grids in the container.
* @return the grid side length of the grids in the container.
*/
auto getGridSideLength() const { return _gridSideLength; }
/**
* Returns the number of grids per dimension on the container.
* @return the number of grids per dimension on the container.
*/
auto getCellsPerDimension() const { return _cellsPerDim; }
/**
* Returns the 2D grid for the XY-plane of this container that defines the cluster towers.
* @return the grids of this container for usage in traversals.
*/
auto &getGrids() { return _clusters; }
/**
* Returns the number of particles in each cluster.
* @return the number of particles in each cluster.
*/
auto getClusterSize() const { return _clusterSize; }
protected:
/**
* Helper method to sequentially iterate over all clusters.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, index_t
* clusterSize, std::vector<Particle*> clusterNeighborList.
*/
template <class LoopBody>
void traverseClustersSequential(LoopBody &&loopBody) {
for (index_t x = 0; x < _cellsPerDim[0]; x++) {
for (index_t y = 0; y < _cellsPerDim[1]; y++) {
index_t index = VerletClusterMaths::index1D(x, y, _cellsPerDim);
auto &grid = _clusters[index];
auto &gridNeighborList = _neighborLists[index];
const index_t numClustersInGrid = grid.numParticles() / _clusterSize;
for (index_t clusterInGrid = 0; clusterInGrid < numClustersInGrid; clusterInGrid++) {
Particle *iClusterStart = &grid[clusterInGrid * _clusterSize];
auto &clusterNeighborList = gridNeighborList[clusterInGrid];
loopBody(iClusterStart, _clusterSize, clusterNeighborList);
}
}
}
}
/**
* Helper method to iterate over all clusters in parallel.
*
* It is alwys safe to modify the particles in the cluster that is passed to the given loop body. However, when
* modifying particles from other clusters, the caller has to make sure that no data races occur. Particles must not
* be added or removed during the traversal.
* @tparam LoopBody The type of the lambda to execute for all clusters.
* @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, index_t
* clusterSize, std::vector<Particle*> clusterNeighborList.
*/
template <class LoopBody>
void traverseClustersParallel(LoopBody &&loopBody) {
const index_t endX = _cellsPerDim[0];
const index_t endY = _cellsPerDim[1];
#if defined(AUTOPAS_OPENMP)
// @todo: find sensible chunksize
#pragma omp parallel for schedule(dynamic) collapse(2)
#endif
for (index_t x = 0; x < endX; x++) {
for (index_t y = 0; y < endY; y++) {
index_t index = VerletClusterMaths::index1D(x, y, _cellsPerDim);
auto &grid = _clusters[index];
auto &gridNeighborList = _neighborLists[index];
const index_t numClustersInGrid = grid.numParticles() / _clusterSize;
for (index_t clusterInGrid = 0; clusterInGrid < numClustersInGrid; clusterInGrid++) {
Particle *iClusterStart = &grid[clusterInGrid * _clusterSize];
auto &clusterNeighborList = gridNeighborList[clusterInGrid];
loopBody(iClusterStart, _clusterSize, clusterNeighborList);
}
}
}
}
/**
* Recalculate grids and clusters, build verlet lists and pad clusters.
* @param useNewton3 If the everything should be build using newton 3 or not.
*/
void rebuild(bool useNewton3) {
std::vector<Particle> invalidParticles = collectParticlesAndClearClusters();
auto boxSize = ArrayMath::sub(_boxMax, _boxMin);
_gridSideLength = estimateOptimalGridSideLength(invalidParticles.size(), boxSize);
_gridSideLengthReciprocal = 1 / _gridSideLength;
_cellsPerDim = calculateCellsPerDim(boxSize);
// _cellsPerDim[2] is always 1
index_t numCells = _cellsPerDim[0] * _cellsPerDim[1];
// resize to number of grids
_clusters.resize(numCells);
_neighborLists.resize(numCells);
sortParticlesIntoClusters(invalidParticles);
// sort by last dimension and reserve space for dummy particles
for (auto &cluster : _clusters) {
cluster.sortByDim(2);
size_t size = cluster.numParticles();
size_t rest = size % _clusterSize;
if (rest > 0) cluster.reserve(size + (_clusterSize - rest));
}
clearNeighborLists();
_numClusters = buildClusterIndexMap();
updateVerletLists(useNewton3);
// fill last cluster with dummy particles, such that each cluster is a multiple of _clusterSize
padClusters();
}
/**
* Takes all particles from all clusters and returns them. Clusters are cleared.
* @return All particles in the container.
*/
std::vector<Particle> collectParticlesAndClearClusters() {
std::vector<Particle> invalidParticles;
for (auto &cluster : _clusters) {
for (auto it = cluster.begin(); it.isValid(); ++it) {
invalidParticles.push_back(*it);
}
cluster.clear();
}
return invalidParticles;
}
/**
* Estimates the optimal grid side length.
* @param numParticles The number of particles in the container.
* @param boxSize The size of the domain.
* @return an estimated optimal grid side length.
*/
virtual double estimateOptimalGridSideLength(size_t numParticles, std::array<double, 3> boxSize) const {
double volume = boxSize[0] * boxSize[1] * boxSize[2];
if (numParticles > 0) {
// estimate particle density
double density = numParticles / volume;
return std::cbrt(_clusterSize / density);
} else {
return std::max(boxSize[0], boxSize[1]);
}
}
/**
* Calculates the cells per dimension in the container using the _gridSideLengthReciprocal.
* @param boxSize the size of the domain.
* @return the cells per dimension in the container.
*/
std::array<index_t, 3> calculateCellsPerDim(std::array<double, 3> boxSize) const {
std::array<index_t, 3> cellsPerDim{};
for (int d = 0; d < 2; d++) {
cellsPerDim[d] = static_cast<index_t>(std::ceil(boxSize[d] * _gridSideLengthReciprocal));
// at least one cell
cellsPerDim[d] = std::max(cellsPerDim[d], 1ul);
}
cellsPerDim[2] = 1ul;
return cellsPerDim;
}
/**
* Sorts all passed particles in the appropriate clusters.
* @param particles The particles to sort in the clusters.
*/
void sortParticlesIntoClusters(std::vector<Particle> &particles) {
for (auto &particle : particles) {
if (utils::inBox(particle.getR(), _boxMin, _boxMax)) {
auto index = get1DIndexOfPosition(particle.getR());
_clusters[index].addParticle(particle);
}
}
}
/**
* Clears all neighbor lists.
*/
void clearNeighborLists() {
for (auto &verlet : _neighborLists) {
verlet.clear();
}
}
/**
* Update the verlet lists.
*
* @param useNewton3 If newton 3 should be used to build the neighbor lists or not. If true, only saves neighbor
* clusters that have a higher index that the current cluster. (@see buildClusterIndexMap())
*/
void updateVerletLists(bool useNewton3) {
_neighborListIsNewton3 = useNewton3;
const int boxRange = static_cast<int>(std::ceil((_cutoff + _skin) * _gridSideLengthReciprocal));
const int gridMaxX = _cellsPerDim[0] - 1;
const int gridMaxY = _cellsPerDim[1] - 1;
// for all grids
for (int yi = 0; yi <= gridMaxY; yi++) {
for (int xi = 0; xi <= gridMaxX; xi++) {
auto &iGrid = _clusters[VerletClusterMaths::index1D(xi, yi, _cellsPerDim)];
// calculate number of full clusters and rest
index_t iSize = iGrid.numParticles() / _clusterSize;
int iRest = iGrid.numParticles() % _clusterSize;
const int minX = std::max(xi - boxRange, 0);
const int minY = std::max(yi - boxRange, 0);
const int maxX = std::min(xi + boxRange, gridMaxX);
const int maxY = std::min(yi + boxRange, gridMaxY);
auto &iNeighbors = _neighborLists[VerletClusterMaths::index1D(xi, yi, _cellsPerDim)];
if (iRest > 0)
iNeighbors.resize(iSize + 1);
else
iNeighbors.resize(iSize);
addClustersOfNeighborGridsAsNeighborsIfInRange(iGrid, iSize, iRest, iNeighbors, minX, maxX, minY, maxY, xi, yi);
}
}
}
/**
* Iterates over neighbor grids of the i-th grid and adds all clusters in them that are within the cutoff radius to
* the neighbor list of the clusters in the i-th grid.
* @param iGrid The i-th grid.
* @param iSize The number of full clusters in the i-th grid.
* @param iRest If the last cluster is not full: The number of particles in the last cluster. 0 otherwise.
* @param iNeighbors The neighbor list of the i-th grid.
* @param minX
* @param maxX
* @param minY
* @param maxY
* @param xi The x-index of the i-th grid.
* @param yi the y-index of the i-th grid.
*/
void addClustersOfNeighborGridsAsNeighborsIfInRange(FullParticleCell<Particle> &iGrid, index_t iSize, int iRest,
std::vector<std::vector<Particle *>> &iNeighbors, const int minX,
const int maxX, const int minY, const int maxY, const int xi,
const int yi) {
// for all neighbor grids
for (int yj = minY; yj <= maxY; yj++) {
double distY = std::max(0, std::abs(yi - yj) - 1) * _gridSideLength;
for (int xj = minX; xj <= maxX; xj++) {
double distX = std::max(0, std::abs(xi - xj) - 1) * _gridSideLength;
// calculate distance in xy-plane and skip if already longer than cutoff
double distXYsqr = distX * distX + distY * distY;
if (distXYsqr <= _interactionLengthSqr) {
auto &jGrid = _clusters[VerletClusterMaths::index1D(xj, yj, _cellsPerDim)];
// calculate number of full clusters and rest
const index_t jSize = jGrid.numParticles() / _clusterSize;
const int jRest = jGrid.numParticles() % _clusterSize;
// for all clusters in the i-th grid
for (index_t zi = 0; zi < iSize; zi++) {
addAllJClustersAsNeighborIfInRange(iGrid, zi, _clusterSize, iNeighbors, jGrid, jSize, jRest, distXYsqr);
}
// special case: last cluster of iGrid not full
if (iRest > 0) {
addAllJClustersAsNeighborIfInRange(iGrid, iSize, iRest, iNeighbors, jGrid, jSize, jRest, distXYsqr);
}
}
}
}
}
/**
* Adds all clusters in jGrid that are within the cutoff radius to the neighbor list of the given cluster in iGrid
* (iClusterIndex).
* @param iGrid The i-th grid.
* @param iClusterIndex The index of the cluster to work on in the i-th grid.
* @param iClusterSize The size of th cluster with index iClusterIndex in the i-th grid.
* @param iNeighbors The neighbor list of the i-th grid.
* @param jGrid The j-th grid.
* @param jSize The number of full clusters in the j-th grid.
* @param jRest If the last cluster is not full: The number of particles in the last cluster. 0 otherwise.
* @param distXYsqr The distance between the i-th grid and the j-th grid in the xy-plane.
*/
void addAllJClustersAsNeighborIfInRange(FullParticleCell<Particle> &iGrid, index_t iClusterIndex, int iClusterSize,
std::vector<std::vector<Particle *>> &iNeighbors,
FullParticleCell<Particle> &jGrid, index_t jSize, int jRest,
double distXYsqr) {
// bbox in z of iGrid
double iBBoxBot = iGrid[iClusterIndex * _clusterSize].getR()[2];
double iBBoxTop = iGrid[iClusterIndex * _clusterSize + iClusterSize - 1].getR()[2];
auto &iClusterNeighborList = iNeighbors[iClusterIndex];
Particle *iClusterStart = &iGrid[iClusterIndex * _clusterSize];
// iterate over full clusters of j-th grid.
for (index_t jClusterIndex = 0; jClusterIndex < jSize; jClusterIndex++) {
Particle *jClusterStart = &jGrid[jClusterIndex * _clusterSize];
// If newton 3 is used, only add clusters as neighbors that have a equal or higher index. Skip otherwise.
if (_neighborListIsNewton3 and _clusterIndexMap.at(iClusterStart) > _clusterIndexMap.at(jClusterStart)) continue;
addJClusterAsNeighborIfInRange(jGrid, jClusterStart, _clusterSize, iClusterNeighborList, distXYsqr, iBBoxBot,
iBBoxTop);
}
// special case: last cluster not full
if (jRest > 0) {
Particle *jClusterStart = &jGrid[jSize * _clusterSize];
// If newton 3 is used, only add clusters as neighbors that have a equal or higher index. Skip otherwise.
if (not(_neighborListIsNewton3 and _clusterIndexMap.at(iClusterStart) > _clusterIndexMap.at(jClusterStart))) {
addJClusterAsNeighborIfInRange(jGrid, jClusterStart, jRest, iClusterNeighborList, distXYsqr, iBBoxBot,
iBBoxTop);
}
}
}
/**
* Adds the given cluster in jGrid to the given neighbor list (iClusterNeighborList), if it is within the cutoff
* radius.
* @param jGrid The j-th grid.
* @param jClusterStart A pointer to the start of the cluster to work on in the j-th grid.
* @param jClusterSize The size of the cluster to work on in the j-th grid.
* @param iClusterNeighborList The neighbor list of the cluster in the i-th grid to fill the neighbors for.
* @param distXYsqr The distance between the i-th grid and the j-th grid in the xy-plane.
* @param iBBoxBot The bottom z-coordinate of the cluster in the i-th grid.
* @param iBBoxTop The top z-coordinate of the cluster in the i-th grid.
*/
void addJClusterAsNeighborIfInRange(FullParticleCell<Particle> &jGrid, Particle *jClusterStart, int jClusterSize,
std::vector<Particle *> &iClusterNeighborList, double distXYsqr, double iBBoxBot,
double iBBoxTop) {
// bbox in z of jGrid
double jBBoxBot = jClusterStart->getR()[2];
double jBBoxTop = (jClusterStart + (jClusterSize - 1))->getR()[2];
double distZ = bboxDistance(iBBoxBot, iBBoxTop, jBBoxBot, jBBoxTop);
if (distXYsqr + distZ * distZ <= _interactionLengthSqr) {
iClusterNeighborList.push_back(jClusterStart);
}
}
/**
* Pad clusters with dummy particles
* until each cluster is a multiple of _clusterSize.
* Useful for SIMD vectorization.
*/
void padClusters() {
for (index_t x = 0; x < _cellsPerDim[0]; x++) {
for (index_t y = 0; y < _cellsPerDim[1]; y++) {
auto &grid = _clusters[VerletClusterMaths::index1D(x, y, _cellsPerDim)];
index_t rest = grid.numParticles() % _clusterSize;
if (rest > 0) {
for (int i = rest; i < _clusterSize; i++) {
Particle p = Particle();
p.setR({2 * x * _cutoff, 2 * y * _cutoff, 2 * _boxMax[2] + 2 * i * _cutoff});
grid.addParticle(p);
}
}
}
}
}
/**
* Calculates the distance of two bounding boxes in one dimension.
* @param min1 minimum coordinate of first bbox in tested dimension
* @param max1 maximum coordinate of first bbox in tested dimension
* @param min2 minimum coordinate of second bbox in tested dimension
* @param max2 maximum coordinate of second bbox in tested dimension
* @return distance
*/
inline double bboxDistance(const double min1, const double max1, const double min2, const double max2) const {
if (max1 < min2) {
return min2 - max1;
} else if (min1 > max2) {
return min1 - max2;
} else {
return 0;
}
}
/**
* Gets the 1d grid index containing a particle in given position.
* @param pos the position of the particle
* @return the index of the grid
*/
inline index_t get1DIndexOfPosition(const std::array<double, 3> &pos) const {
std::array<index_t, 2> cellIndex{};
for (int dim = 0; dim < 2; dim++) {
const long int value = (static_cast<long int>(floor((pos[dim] - _boxMin[dim]) * _gridSideLengthReciprocal))) + 1l;
const index_t nonnegativeValue = static_cast<index_t>(std::max(value, 0l));
const index_t nonLargerValue = std::min(nonnegativeValue, _cellsPerDim[dim] - 1);
cellIndex[dim] = nonLargerValue;
/// @todo this is a sanity check to prevent doubling of particles, but
/// could be done better! e.g. by border and flag manager
if (pos[dim] >= _boxMax[dim]) {
cellIndex[dim] = _cellsPerDim[dim] - 1;
} else if (pos[dim] < _boxMin[dim]) {
cellIndex[dim] = 0;
}
}
return VerletClusterMaths::index1D(cellIndex[0], cellIndex[1], _cellsPerDim);
}
/**
* Builds the _clusterIndexMap to be up to date with _clusters.
*
* Every cluster gets an index assigned. The indices are given in a way so that the VerletClustersColoringTraversal
* works as easy as possible with newton 3. The newton 3 neighbor list just has to only save neighbors with a higher
* index, and there will be no data races.
*
* For each cluster now holds (with x-axis as left <=> right, y-axis <=> as top <=> bottom):
* - The indices of all clusters of the three color cells above and the color cell to the left are lower.
* - The indices of all clusters of the three color cells below and the color cell to the right are higher.
* - For all grids of the same color cell holds:
* - The indices of all clusters of the three grids above and the grids to the left are lower.
* - The indices of all clusters of the three grids below and the grids to the right are higher.
* - For all clusters in the same grid holds:
* - The indices of all clusters with a lower z-coordinate than the current cluster are lower.
* - The indices of all clusters with a higher z-coordinate than the current cluster are higher.
*
* @return The number of clusters in the container.
*/
index_t buildClusterIndexMap() {
index_t nextFreeMapIndex = 0;
int gridsPerColoringCell = static_cast<int>(std::ceil((_cutoff + _skin) / _gridSideLength));
std::array<unsigned long, 3> coloringCellsPerDim{};
for (int i = 0; i < 3; i++) {
coloringCellsPerDim[i] =
static_cast<unsigned long>(std::ceil(_cellsPerDim[i] / static_cast<double>(gridsPerColoringCell)));
}
for (unsigned long yColorCell = 0; yColorCell < coloringCellsPerDim[1]; yColorCell++) {
for (unsigned long xColorCell = 0; xColorCell < coloringCellsPerDim[0]; xColorCell++) {
nextFreeMapIndex = indexColorCell(xColorCell, yColorCell, gridsPerColoringCell, nextFreeMapIndex);
}
}
return nextFreeMapIndex;
}
private:
/**
* Indexes all clusters of one color cell (inserts value into _clusterIndexMap) starting with currentMapIndex.
*
* The scheme follows the documentation from buildClusterIndexMap().
* @param xColorCell The x coordinate of the color cell.
* @param yColorCell The y coordinate of the color cell.
* @param gridsPerColoringCell The number of grids in x and y dimension of this color cell.
* @param currentMapIndex The first index to use.
* @return The next available index after this cell.
*/
index_t indexColorCell(unsigned long xColorCell, unsigned long yColorCell, int gridsPerColoringCell,
index_t currentMapIndex) {
for (int yInner = 0; yInner < gridsPerColoringCell; yInner++) {
for (int xInner = 0; xInner < gridsPerColoringCell; xInner++) {
unsigned long y = yColorCell * gridsPerColoringCell + yInner;
unsigned long x = xColorCell * gridsPerColoringCell + xInner;
// Not every coloring cell has to have gridsPerColoringCell grids in every direction.
if (x >= _cellsPerDim[0] or y >= _cellsPerDim[1]) {
continue;
}
unsigned long gridIndex1D = VerletClusterMaths::index1D(x, y, _cellsPerDim);
auto ¤tGrid = _clusters[gridIndex1D];
auto numClusters = currentGrid.numParticles() / _clusterSize;
int rest = currentGrid.numParticles() % _clusterSize;
if (rest > 0) numClusters++;
for (unsigned long currentCluster = 0; currentCluster < numClusters; currentCluster++) {
Particle *clusterStart = ¤tGrid[currentCluster * _clusterSize];
_clusterIndexMap[clusterStart] = currentMapIndex++;
}
}
}
return currentMapIndex;
}
private:
/**
* Neighbors of clusters for each grid. If it uses newton 3 is saved in _neighborListIsNewton3.
* If it uses newton 3: Only the neighbor clusters that have a higher index are saved. (@see _clusterIndexMap)
*/
std::vector<std::vector<std::vector<Particle *>>> _neighborLists;
/**
* internal storage, particles are split into a grid in xy-dimension
*/
std::vector<FullParticleCell<Particle>> _clusters;
/**
* The number of particles in a full cluster.
*/
int _clusterSize;
/**
* The number of clusters. This is not equal to _clusters.size(), as every grid might contain multiple clusters.
*/
index_t _numClusters;
/**
* Box min of the domain.
*/
std::array<double, 3> _boxMin;
/**
* Box max of the domain.
*/
std::array<double, 3> _boxMax;
/**
* Side length of xy-grid.
*/
double _gridSideLength{0.};
/**
* Reciprocal of _gridSideLength.
*/
double _gridSideLengthReciprocal{0.};
/**
* Dimensions of the 2D xy-grid.
*/
std::array<index_t, 3> _cellsPerDim{};
/**
* The skin radius.
*/
double _skin;
/**
* The cutoff.
*/
double _cutoff;
/**
* Specifies if the neighbor list uses newton 3 or not.
*/
bool _neighborListIsNewton3;
/**
* Maps indices to the starting pointers for each cluster. For the idea behind the assignment, @see
* buildClusterIndexMap().
*/
std::unordered_map<Particle *, index_t> _clusterIndexMap;
/**
* (_cutoff + _skin)^2.
*/
double _interactionLengthSqr;
};
} // namespace autopas
|
GB_unop__log_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log_fc32_fc32)
// op(A') function: GB (_unop_tran__log_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = clogf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = clogf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = clogf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = clogf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = clogf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mxnet_op.h | /*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <mxnet/base.h>
#include <algorithm>
namespace mxnet {
namespace op {
namespace mxnet_op {
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename OP, typename xpu>
struct Kernel;
template<typename OP>
struct Kernel<OP, cpu> {
template<typename ...Args>
inline static void Launch(mshadow::Stream<cpu> *s, int N, Args... args) {
#if (MXNET_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
};
#endif // __CUDACC__
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const int ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const int ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType* datas,
DType a_min, DType a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType* grad, const DType* datas,
DType a_min, DType a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
#define REVERSE_MAX_DIM 10
struct reverse {
MSHADOW_XINLINE static int ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(int index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(int index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
register ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CropImage)
#endif
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExcerptImage)
#endif
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == geometry->width) &&
(image->rows == geometry->height) &&
(geometry->x == 0) && (geometry->y == 0))
return(CloneImage(image,0,0,MagickTrue,exception));
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageBackgroundColor(extent_image,exception);
(void) CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlipImage)
#endif
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlopImage)
#endif
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransverseImage)
#endif
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
convolution_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*25 + q*25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// v11 = rx1 / rx3
// v12 = rx2
// v13 v14 = intermediate sum register
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n"// v7 = out
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.4s}, [%2] \n"// v8 = out2
// r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v9.4s, v10.4s}, [%4] \n"// v9 v10 = r10 r14
"add %4, %4, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r11
"fmul v13.4s, v9.4s, %19.s[1] \n"
"fmla v8.4s, v9.4s, %18.s[0] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r12
"fmla v7.4s, v11.4s, %19.s[2] \n"
"fmul v14.4s, v11.4s, %18.s[1] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r13
"fmla v13.4s, v12.4s, %19.s[3] \n"
"fmla v8.4s, v12.4s, %18.s[2] \n"
"fmla v7.4s, v11.4s, %20.s[0] \n"
"fmla v14.4s, v11.4s, %18.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v13.4s, v10.4s, %20.s[1] \n"
"fmla v8.4s, v10.4s, %19.s[0] \n"
// r2
"ld1 {v9.4s, v10.4s}, [%5] \n"// v9 v10 = r20 r24
"add %5, %5, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r21
"fmla v7.4s, v9.4s, %20.s[2] \n"
"fmla v14.4s, v9.4s, %19.s[1] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r22
"fmla v13.4s, v11.4s, %20.s[3] \n"
"fmla v8.4s, v11.4s, %19.s[2] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r23
"fmla v7.4s, v12.4s, %21.s[0] \n"
"fmla v14.4s, v12.4s, %19.s[3] \n"
"fmla v13.4s, v11.4s, %21.s[1] \n"
"fmla v8.4s, v11.4s, %20.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v7.4s, v10.4s, %21.s[2] \n"
"fmla v14.4s, v10.4s, %20.s[1] \n"
// r3
"ld1 {v9.4s, v10.4s}, [%6] \n"// v9 v10 = r30 r34
"add %6, %6, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r31
"fmla v13.4s, v9.4s, %21.s[3] \n"
"fmla v8.4s, v9.4s, %20.s[2] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r32
"fmla v7.4s, v11.4s, %22.s[0] \n"
"fmla v14.4s, v11.4s, %20.s[3] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r33
"fmla v13.4s, v12.4s, %22.s[1] \n"
"fmla v8.4s, v12.4s, %21.s[0] \n"
"fmla v7.4s, v11.4s, %22.s[2] \n"
"fmla v14.4s, v11.4s, %21.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"fmla v13.4s, v10.4s, %22.s[3] \n"
"fmla v8.4s, v10.4s, %21.s[2] \n"
// r4
"ld1 {v9.4s, v10.4s}, [%7] \n"// v9 v10 = r40 r44
"add %7, %7, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r41
"fmla v7.4s, v9.4s, %23.s[0] \n"
"fmla v14.4s, v9.4s, %21.s[3] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r41
"fmla v13.4s, v11.4s, %23.s[1] \n"
"fmla v8.4s, v11.4s, %22.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r41
"fmla v7.4s, v12.4s, %23.s[2] \n"
"fmla v14.4s, v12.4s, %22.s[1] \n"
"fmla v13.4s, v11.4s, %23.s[3] \n"
"fmla v8.4s, v11.4s, %22.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmla v7.4s, v10.4s, %24.s[0] \n"
"fmla v14.4s, v10.4s, %22.s[3] \n"
// r0 and r5
"ld1 {v9.4s, v10.4s}, [%3] \n"// v9 v10 = r00 r04
"add %3, %3, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r01
"fmla v13.4s, v11.4s, %18.s[1] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r02
"fmla v7.4s, v12.4s, %18.s[2] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r03
"prfm pldl1keep, [%8, #256] \n"
"fmla v13.4s, v11.4s, %18.s[3] \n"
// r5
"ld1 {v11.4s, v12.4s}, [%8] \n"// v11 v12 = r50 r54
"add %8, %8, #16 \n"
"fmla v8.4s, v11.4s, %23.s[0] \n"
"fmla v14.4s, v12.4s, %24.s[0] \n"
"fmla v7.4s, v9.4s, %18.s[0] \n"
"fmla v13.4s, v10.4s, %19.s[0] \n"
"ext v9.16b, v11.16b, v12.16b, #4 \n" //r51
"ext v10.16b, v11.16b, v12.16b, #8 \n" //r52
"fmla v14.4s, v9.4s, %23.s[1] \n"
"ext v9.16b, v11.16b, v12.16b, #12 \n" //r53
"fmla v8.4s, v10.4s, %23.s[2] \n"
"fmla v14.4s, v9.4s, %23.s[3] \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"st1 {v7.4s}, [%1], #16 \n"
"fadd v8.4s, v8.4s, v14.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n"// v7 = out
"st1 {v8.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
// "veor q13, q13 \n"
// "veor q14, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = out
"0: \n"
// q11 = rx1 / rx3
// q12 = rx2
// q13 q14 = intermediate sum register
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n"// q8 = out2
"pld [%4, #256] \n"
// r1
"vld1.f32 {d18-d21}, [%4] \n"// q9 q10 = r10 r14
"add %4, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r11
"vmul.f32 q13, q9, %e19[1] \n"
"vmla.f32 q8, q9, %e18[0] \n"
"vext.32 q12, q9, q10, #2 \n"// r12
"vmla.f32 q7, q11, %f19[0] \n"
"vmul.f32 q14, q11, %e18[1] \n"
"vext.32 q11, q9, q10, #3 \n"// r13
"vmla.f32 q13, q12, %f19[1] \n"
"vmla.f32 q8, q12, %f18[0] \n"
"vmla.f32 q7, q11, %e20[0] \n"
"vmla.f32 q14, q11, %f18[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q13, q10, %e20[1] \n"
"vmla.f32 q8, q10, %e19[0] \n"
// r2
"vld1.f32 {d18-d21}, [%5] \n"// q9 q10 = r20 r24
"add %5, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r21
"vmla.f32 q7, q9, %f20[0] \n"
"vmla.f32 q14, q9, %e19[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r22
"vmla.f32 q13, q11, %f20[1] \n"
"vmla.f32 q8, q11, %f19[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r23
"vmla.f32 q7, q12, %e21[0] \n"
"vmla.f32 q14, q12, %f19[1] \n"
"vmla.f32 q13, q11, %e21[1] \n"
"vmla.f32 q8, q11, %e20[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q7, q10, %f21[0] \n"
"vmla.f32 q14, q10, %e20[1] \n"
// r3
"vld1.f32 {d18-d21}, [%6] \n"// q9 q10 = r30 r34
"add %6, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r31
"vmla.f32 q13, q9, %f21[1] \n"
"vmla.f32 q8, q9, %f20[0] \n"
"vext.32 q12, q9, q10, #2 \n"// r32
"vmla.f32 q7, q11, %e22[0] \n"
"vmla.f32 q14, q11, %f20[1] \n"
"vext.32 q11, q9, q10, #3 \n"// r33
"vmla.f32 q13, q12, %e22[1] \n"
"vmla.f32 q8, q12, %e21[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q14, q11, %e21[1] \n"
"pld [%7, #256] \n"
"vmla.f32 q13, q10, %f22[1] \n"
"vmla.f32 q8, q10, %f21[0] \n"
// r4
"vld1.f32 {d18-d21}, [%7] \n"// q9 q10 = r40 r44
"add %7, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r41
"vmla.f32 q7, q9, %e23[0] \n"
"vmla.f32 q14, q9, %f21[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r42
"vmla.f32 q13, q11, %e23[1] \n"
"vmla.f32 q8, q11, %e22[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r43
"vmla.f32 q7, q12, %f23[0] \n"
"vmla.f32 q14, q12, %e22[1] \n"
"vmla.f32 q13, q11, %f23[1] \n"
"vmla.f32 q8, q11, %f22[0] \n"
"pld [%3, #256] \n"
"vmla.f32 q7, q10, %e24[0] \n"
"vmla.f32 q14, q10, %f22[1] \n"
// r0 and r5
"vld1.f32 {d18-d21}, [%3] \n"// q9 q10 = r00 r04
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r01
"vmla.f32 q13, q11, %e18[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r02
"vmla.f32 q7, q12, %f18[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r03
"pld [%8, #256] \n"
"vmla.f32 q13, q11, %f18[1] \n"
// r5
"vld1.f32 {d22-d25}, [%8] \n"// q11 q12 = r50 r54
"add %8, #16 \n"
"vmla.f32 q8, q11, %e23[0] \n"
"vmla.f32 q14, q12, %e24[0] \n"
"vmla.f32 q7, q9, %e18[0] \n"
"vmla.f32 q13, q10, %e19[0] \n"
"vext.32 q9, q11, q12, #1 \n"// r51
"vext.32 q10, q11, q12, #2 \n"// r52
"vmla.f32 q14, q9, %e23[1] \n"
"vext.32 q9, q11, q12, #3 \n"// r53
"vmla.f32 q8, q10, %f23[0] \n"
"vmla.f32 q14, q9, %f23[1] \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q13, q13 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vadd.f32 q8, q8, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = out
// "veor q14, q14 \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
#if __ARM_NEON
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 = r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"// _r00 = vld1q_f32(r0+j);
"add %2, %2, #16 \n"
"0: \n"
"ld1 {v7.4s}, [%1] \n"// _sum = vld1q_f32(outptr+j);
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r01
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r02
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r03
"fmla v7.4s, v8.4s, %14.s[0] \n"
"fmul v13.4s, v10.4s, %14.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmul v14.4s, v11.4s, %14.s[2] \n"
"fmul v15.4s, v12.4s, %14.s[3] \n"
"fmla v7.4s, v9.4s, %15.s[0] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n"
"add %3, %3, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r11
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r12
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r13
"fmla v7.4s, v8.4s, %15.s[1] \n"
"fmla v13.4s, v10.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"fmla v14.4s, v11.4s, %15.s[3] \n"
"fmla v15.4s, v12.4s, %16.s[0] \n"
"fmla v7.4s, v9.4s, %16.s[1] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n"
"add %4, %4, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r21
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r22
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r23
"fmla v7.4s, v8.4s, %16.s[2] \n"
"fmla v13.4s, v10.4s, %16.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v14.4s, v11.4s, %17.s[0] \n"
"fmla v15.4s, v12.4s, %17.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[2] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"
"add %5, %5, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r31
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r32
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r33
"fmla v7.4s, v8.4s, %17.s[3] \n"
"fmla v13.4s, v10.4s, %18.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v14.4s, v11.4s, %18.s[1] \n"
"fmla v15.4s, v12.4s, %18.s[2] \n"
"fmla v7.4s, v9.4s, %18.s[3] \n"
"ld1 {v8.4s, v9.4s}, [%6] \n"
"add %6, %6, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r41
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r42
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r43
"fmla v7.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v10.4s, %19.s[1] \n"
"fmla v14.4s, v11.4s, %19.s[2] \n"
"fmla v15.4s, v12.4s, %19.s[3] \n"
"fmla v7.4s, v9.4s, %20.s[0] \n"
"fadd v14.4s, v14.4s, v15.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"
"add %2, %2, #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%1, #128] \n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n"// _sum = vld1q_f32(outptr+j);
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
"vext.32 q10, q8, q9, #1 \n"// _r01
"vext.32 q11, q8, q9, #2 \n"// _r02
"vext.32 q12, q8, q9, #3 \n"// _r03
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q10, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q11, %f14[0] \n"
"vmul.f32 q15, q12, %f14[1] \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vld1.f32 {d16-d19}, [%3] \n"
"add %3, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q10, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q11, %f15[1] \n"
"vmla.f32 q15, q12, %e16[0] \n"
"vmla.f32 q7, q9, %e16[1] \n"
"vld1.f32 {d16-d19}, [%4] \n"
"add %4, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q10, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q11, %e17[0] \n"
"vmla.f32 q15, q12, %e17[1] \n"
"vmla.f32 q7, q9, %f17[0] \n"
"vld1.f32 {d16-d19}, [%5] \n"
"add %5, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q10, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q11, %e18[1] \n"
"vmla.f32 q15, q12, %f18[0] \n"
"vmla.f32 q7, q9, %f18[1] \n"
"vld1.f32 {d16-d19}, [%6] \n"
"add %6, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q10, %e19[1] \n"
"vmla.f32 q14, q11, %f19[0] \n"
"vmla.f32 q15, q12, %f19[1] \n"
"vmla.f32 q7, q9, %e20[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
}
static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*25 + q*25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n"// v8 = 0 2 4 6 q9 = 1 3 5 7
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v10.4s, v11.4s}, [%2] \n"// v10 = 8 10 12 14 v11 = 9 11 13 15
"prfm pldl1keep, [%1, #128] \n"
"0: \n"
"ld1 {v7.4s}, [%1] \n" // v7 = outptr
"ext v12.16b, v8.16b, v10.16b, #4 \n" // v12 = 2 4 6 8
"ext v11.16b, v9.16b, v11.16b, #4 \n" // v11 = 3 5 7 9
"ext v10.16b, v8.16b, v10.16b, #8 \n" // v10 = 4 6 8 10
"fmla v7.4s, v8.4s, %14.s[0] \n"
"fmul v13.4s, v9.4s, %14.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmul v14.4s, v12.4s, %14.s[2] \n"
"fmul v15.4s, v11.4s, %14.s[3] \n"
"fmla v7.4s, v10.4s, %15.s[0] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %15.s[1] \n"
"fmla v13.4s, v9.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"fmla v14.4s, v12.4s, %15.s[3] \n"
"fmla v15.4s, v11.4s, %16.s[0] \n"
"fmla v7.4s, v10.4s, %16.s[1] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %16.s[2] \n"
"fmla v13.4s, v9.4s, %16.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v14.4s, v12.4s, %17.s[0] \n"
"fmla v15.4s, v11.4s, %17.s[1] \n"
"fmla v7.4s, v10.4s, %17.s[2] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %17.s[3] \n"
"fmla v13.4s, v9.4s, %18.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v14.4s, v12.4s, %18.s[1] \n"
"fmla v15.4s, v11.4s, %18.s[2] \n"
"fmla v7.4s, v10.4s, %18.s[3] \n"
"ld2 {v8.4s, v9.4s}, [%6], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v10.4s, v11.4s}, [%6] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v9.4s, %19.s[1] \n"
"fmla v14.4s, v12.4s, %19.s[2] \n"
"fmla v15.4s, v11.4s, %19.s[3] \n"
"fmla v7.4s, v10.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n"
"fadd v14.4s, v14.4s, v15.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ld2 {v10.4s, v11.4s}, [%2] \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
// "veor q14, q14 \n"// _sump3 = 0;
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15
"pld [%1, #128] \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr
"vext.32 q12, q8, q10, #1 \n"// q12 = 2 4 6 8
"vext.32 q11, q9, q11, #1 \n"// q11 = 3 5 7 9
"vext.32 q10, q8, q10, #2 \n"// q10 = 4 6 8 10
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q9, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q12, %f14[0] \n"
"vmul.f32 q15, q11, %f14[1] \n"
"vmla.f32 q7, q10, %e15[0] \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q9, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q12, %f15[1] \n"
"vmla.f32 q15, q11, %e16[0] \n"
"vmla.f32 q7, q10, %e16[1] \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q9, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q12, %e17[0] \n"
"vmla.f32 q15, q11, %e17[1] \n"
"vmla.f32 q7, q10, %f17[0] \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q9, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q12, %e18[1] \n"
"vmla.f32 q15, q11, %f18[0] \n"
"vmla.f32 q7, q10, %f18[1] \n"
"vld2.f32 {d16-d19}, [%6]! \n"
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q9, %e19[1] \n"
"vmla.f32 q14, q12, %f19[0] \n"
"vmla.f32 q15, q11, %f19[1] \n"
"vmla.f32 q7, q10, %e20[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15
// "veor q14, q14 \n"// _sump3 = 0;
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
ft.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include <stdint.h>
#include "npb-C.h"
/* global variables */
#include "global.h"
/* function declarations */
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]);
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]);
static void ipow46(double a, int exponent, double *result);
static void setup(void);
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]);
static void print_timers(void);
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]);
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void fft_init (int n);
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static int ilog2(int n);
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]);
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class);
/*--------------------------------------------------------------------
c FT benchmark
c-------------------------------------------------------------------*/
static int realmain(void *carg)
{
unsigned arg = (uintptr_t)carg;
/*c-------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i, ierr;
/*------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------*/
static dcomplex u0[NZ][NY][NX];
static dcomplex pad1[3];
static dcomplex u1[NZ][NY][NX];
static dcomplex pad2[3];
static dcomplex u2[NZ][NY][NX];
static dcomplex pad3[3];
static int indexmap[NZ][NY][NX];
int iter;
int nthreads = 1;
double total_time, mflops;
boolean verified;
char class;
omp_set_num_threads(arg);
/*--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
setup();
#pragma omp parallel
{
compute_indexmap(indexmap, dims[2]);
#pragma omp single
{
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
}
fft(1, u1, u0);
} /* end parallel */
/*--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
timer_start(T_TOTAL);
if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP);
#pragma omp parallel private(iter) firstprivate(niter)
{
compute_indexmap(indexmap, dims[2]);
#pragma omp single
{
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
}
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_SETUP);
}
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_start(T_FFT);
}
fft(1, u1, u0);
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_FFT);
}
for (iter = 1; iter <= niter; iter++) {
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_start(T_EVOLVE);
}
evolve(u0, u1, iter, indexmap, dims[0]);
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_EVOLVE);
}
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_start(T_FFT);
}
fft(-1, u1, u2);
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_FFT);
}
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_start(T_CHECKSUM);
}
checksum(iter, u2, dims[0]);
if (TIMERS_ENABLED == TRUE) {
#pragma omp master
timer_stop(T_CHECKSUM);
}
}
#pragma omp single
verify(NX, NY, NZ, niter, &verified, &class);
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(T_TOTAL);
total_time = timer_read(T_TOTAL);
if( total_time != 0.0) {
mflops = 1.0e-6*(double)(NTOTAL) *
(14.8157+7.19641*log((double)(NTOTAL))
+ (5.23518+7.21113*log((double)(NTOTAL)))*niter)
/total_time;
} else {
mflops = 0.0;
}
#ifdef BOMP
backend_create_time(arg);
#endif
printf("Computetime %d %f\n", arg, total_time);
printf("client done\n");
/* c_print_results("FT", class, NX, NY, NZ, niter, nthreads, */
/* total_time, mflops, " floating point", verified, */
/* NPBVERSION, COMPILETIME, */
/* CS1, CS2, CS3, CS4, CS5, CS6, CS7); */
if (TIMERS_ENABLED == TRUE) print_timers();
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c evolve u0 -> u1 (t time steps) in fourier space
c-------------------------------------------------------------------*/
int i, j, k;
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (j = 0; j < d[1]; j++) {
for (i = 0; i < d[0]; i++) {
crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------*/
int k;
double x0, start, an, dummy;
static double tmp[NX*2*MAXDIM+1];
int i,j,t;
start = SEED;
/*--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------*/
ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an);
dummy = randlc(&start, an);
ipow46(A, 2*NX*NY, &an);
/*--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------*/
for (k = 0; k < dims[0][2]; k++) {
x0 = start;
vranlc(2*NX*dims[0][1], &x0, A, tmp);
t = 1;
for (j = 0; j < dims[0][1]; j++)
for (i = 0; i < NX; i++) {
u0[k][j][i].real = tmp[t++];
u0[k][j][i].imag = tmp[t++];
}
if (k != dims[0][2]) dummy = randlc(&start, an);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void ipow46(double a, int exponent, double *result) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------*/
double dummy, q, r;
int n, n2;
/*--------------------------------------------------------------------
c Use
c a^n = a^(n/2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------*/
*result = 1;
if (exponent == 0) return;
q = a;
r = 1;
n = exponent;
while (n > 1) {
n2 = n/2;
if (n2 * 2 == n) {
dummy = randlc(&q, q);
n = n2;
} else {
dummy = randlc(&r, q);
n = n-1;
}
}
dummy = randlc(&r, q);
*result = r;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void setup(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, i, j, fstatus;
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - FT Benchmark\n\n");
niter = NITER_DEFAULT;
printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ);
printf(" Iterations : %7d\n", niter);
/* 1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')*/
for (i = 0;i < 3 ; i++) {
dims[i][0] = NX;
dims[i][1] = NY;
dims[i][2] = NZ;
}
for (i = 0; i < 3; i++) {
xstart[i] = 1;
xend[i] = NX;
ystart[i] = 1;
yend[i] = NY;
zstart[i] = 1;
zend[i] = NZ;
}
/*--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------*/
fftblock = FFTBLOCK_DEFAULT;
fftblockpad = FFTBLOCKPAD_DEFAULT;
if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------*/
int i, j, k, ii, ii2, jj, ij2, kk;
double ap;
/*--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < dims[2][0]; i++) {
ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;
ii2 = ii*ii;
for (j = 0; j < dims[2][1]; j++) {
jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;
ij2 = jj*jj+ii2;
for (k = 0; k < dims[2][2]; k++) {
kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;
indexmap[k][j][i] = kk*kk+ij2;
}
}
}
/*--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------*/
#pragma omp single
{
ap = - 4.0 * ALPHA * PI * PI;
ex[0] = 1.0;
ex[1] = exp(ap);
for (i = 2; i <= EXPMAX; i++) {
ex[i] = ex[i-1]*ex[1];
}
} /* end single */
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void print_timers(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i;
char *tstrings[] = { " total ",
" setup ",
" fft ",
" evolve ",
" checksum ",
" fftlow ",
" fftcopy " };
for (i = 0; i < T_MAX; i++) {
if (timer_read(i) != 0.0) {
printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i));
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
/*--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
if (dir == 1) {
cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */
} else {
cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, jj;
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
y0[i][j].real = x[k][j+jj][i].real;
y0[i][j].imag = x[k][j+jj][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[0],
d[0], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
xout[k][j+jj][i].real = y0[i][j].real;
xout[k][j+jj][i].imag = y0[i][j].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
y0[j][i].real = x[k][j][i+ii].real;
y0[j][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[1],
d[1], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[j][i].real;
xout[k][j][i+ii].imag = y0[j][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
for (i = 0;i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp for
for (j = 0; j < d[1]; j++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
y0[k][i].real = x[k][j][i+ii].real;
y0[k][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[2],
d[2], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[k][i].real;
xout[k][j][i+ii].imag = y0[k][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft_init (int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------*/
int m,nu,ku,i,j,ln;
double t, ti;
/*--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------*/
nu = n;
m = ilog2(n);
u[0].real = (double)m;
u[0].imag = 0.0;
ku = 1;
ln = 1;
for (j = 1; j <= m; j++) {
t = PI / ln;
for (i = 0; i <= ln - 1; i++) {
ti = i * t;
u[i+ku].real = cos(ti);
u[i+ku].imag = sin(ti);
}
ku = ku + ln;
ln = 2 * ln;
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Computes NY N-point complex-to-complex FFTs of X using an algorithm due
c to Swarztrauber. X is both the input and the output array, while Y is a
c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to
c perform FFTs, the array U must be initialized by calling CFFTZ with IS
c set to 0 and M set to MX, where MX is the maximum value of M for any
c subsequent call.
c-------------------------------------------------------------------*/
int i,j,l,mx;
/*--------------------------------------------------------------------
c Check if input parameters are invalid.
c-------------------------------------------------------------------*/
mx = (int)(u[0].real);
if ((is != 1 && is != -1) || m < 1 || m > mx) {
printf("CFFTZ: Either U has not been initialized, or else\n"
"one of the input parameters is invalid%5d%5d%5d\n",
is, m, mx);
exit(1);
}
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= m; l+=2) {
fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y);
if (l == m) break;
fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x);
}
/*--------------------------------------------------------------------
c Copy Y to X.
c-------------------------------------------------------------------*/
if (m % 2 == 1) {
for (j = 0; j < n; j++) {
for (i = 0; i < fftblock; i++) {
x[j][i].real = y[j][i].real;
x[j][i].imag = y[j][i].imag;
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs the L-th iteration of the second variant of the Stockham FFT.
c-------------------------------------------------------------------*/
int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22;
dcomplex u1,x11,x21;
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
n1 = n / 2;
if (l-1 == 0) {
lk = 1;
} else {
lk = 2 << ((l - 1)-1);
}
if (m-l == 0) {
li = 1;
} else {
li = 2 << ((m - l)-1);
}
lj = 2 * lk;
ku = li;
for (i = 0; i < li; i++) {
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if (is >= 1) {
u1.real = u[ku+i].real;
u1.imag = u[ku+i].imag;
} else {
u1.real = u[ku+i].real;
u1.imag = -u[ku+i].imag;
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k = 0; k < lk; k++) {
for (j = 0; j < ny; j++) {
double x11real, x11imag;
double x21real, x21imag;
x11real = x[i11+k][j].real;
x11imag = x[i11+k][j].imag;
x21real = x[i12+k][j].real;
x21imag = x[i12+k][j].imag;
y[i21+k][j].real = x11real + x21real;
y[i21+k][j].imag = x11imag + x21imag;
y[i22+k][j].real = u1.real * (x11real - x21real)
- u1.imag * (x11imag - x21imag);
y[i22+k][j].imag = u1.real * (x11imag - x21imag)
+ u1.imag * (x11real - x21real);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static int ilog2(int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int nn, lg;
if (n == 1) {
return 0;
}
lg = 1;
nn = 2;
while (nn < n) {
nn = nn << 1;
lg++;
}
return lg;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int j, q,r,s, ierr;
dcomplex chk,allchk;
chk.real = 0.0;
chk.imag = 0.0;
#pragma omp for nowait
for (j = 1; j <= 1024; j++) {
q = j%NX+1;
if (q >= xstart[0] && q <= xend[0]) {
r = (3*j)%NY+1;
if (r >= ystart[0] && r <= yend[0]) {
s = (5*j)%NZ+1;
if (s >= zstart[0] && s <= zend[0]) {
cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]);
}
}
}
}
#pragma omp critical
{
sums[i].real += chk.real;
sums[i].imag += chk.imag;
}
#pragma omp barrier
#pragma omp single
{
/* complex % real */
sums[i].real = sums[i].real/(double)(NTOTAL);
sums[i].imag = sums[i].imag/(double)(NTOTAL);
/* printf("T = %5d Checksum = %22.12e %22.12e\n", */
/* i, sums[i].real, sums[i].imag); */
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, size, i;
double err, epsilon;
/*--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_s[6+1] = { 0.0,
5.546087004964e+02,
5.546385409189e+02,
5.546148406171e+02,
5.545423607415e+02,
5.544255039624e+02,
5.542683411902e+02 };
double vdata_imag_s[6+1] = { 0.0,
4.845363331978e+02,
4.865304269511e+02,
4.883910722336e+02,
4.901273169046e+02,
4.917475857993e+02,
4.932597244941e+02 };
/*--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_w[6+1] = { 0.0,
5.673612178944e+02,
5.631436885271e+02,
5.594024089970e+02,
5.560698047020e+02,
5.530898991250e+02,
5.504159734538e+02 };
double vdata_imag_w[6+1] = { 0.0,
5.293246849175e+02,
5.282149986629e+02,
5.270996558037e+02,
5.260027904925e+02,
5.249400845633e+02,
5.239212247086e+02 };
/*--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_a[6+1] = { 0.0,
5.046735008193e+02,
5.059412319734e+02,
5.069376896287e+02,
5.077892868474e+02,
5.085233095391e+02,
5.091487099959e+02 };
double vdata_imag_a[6+1] = { 0.0,
5.114047905510e+02,
5.098809666433e+02,
5.098144042213e+02,
5.101336130759e+02,
5.104914655194e+02,
5.107917842803e+02 };
/*--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_b[20+1] = { 0.0,
5.177643571579e+02,
5.154521291263e+02,
5.146409228649e+02,
5.142378756213e+02,
5.139626667737e+02,
5.137423460082e+02,
5.135547056878e+02,
5.133910925466e+02,
5.132470705390e+02,
5.131197729984e+02,
5.130070319283e+02,
5.129070537032e+02,
5.128182883502e+02,
5.127393733383e+02,
5.126691062020e+02,
5.126064276004e+02,
5.125504076570e+02,
5.125002331720e+02,
5.124551951846e+02,
5.124146770029e+02 };
double vdata_imag_b[20+1] = { 0.0,
5.077803458597e+02,
5.088249431599e+02,
5.096208912659e+02,
5.101023387619e+02,
5.103976610617e+02,
5.105948019802e+02,
5.107404165783e+02,
5.108576573661e+02,
5.109577278523e+02,
5.110460304483e+02,
5.111252433800e+02,
5.111968077718e+02,
5.112616233064e+02,
5.113203605551e+02,
5.113735928093e+02,
5.114218460548e+02,
5.114656139760e+02,
5.115053595966e+02,
5.115415130407e+02,
5.115744692211e+02 };
/*--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_c[20+1] = { 0.0,
5.195078707457e+02,
5.155422171134e+02,
5.144678022222e+02,
5.140150594328e+02,
5.137550426810e+02,
5.135811056728e+02,
5.134569343165e+02,
5.133651975661e+02,
5.132955192805e+02,
5.132410471738e+02,
5.131971141679e+02,
5.131605205716e+02,
5.131290734194e+02,
5.131012720314e+02,
5.130760908195e+02,
5.130528295923e+02,
5.130310107773e+02,
5.130103090133e+02,
5.129905029333e+02,
5.129714421109e+02 };
double vdata_imag_c[20+1] = { 0.0,
5.149019699238e+02,
5.127578201997e+02,
5.122251847514e+02,
5.121090289018e+02,
5.121143685824e+02,
5.121496764568e+02,
5.121870921893e+02,
5.122193250322e+02,
5.122454735794e+02,
5.122663649603e+02,
5.122830879827e+02,
5.122965869718e+02,
5.123075927445e+02,
5.123166486553e+02,
5.123241541685e+02,
5.123304037599e+02,
5.123356167976e+02,
5.123399592211e+02,
5.123435588985e+02,
5.123465164008e+02 };
epsilon = 1.0e-12;
*verified = TRUE;
*class = 'U';
if (d1 == 64 &&
d2 == 64 &&
d3 == 64 &&
nt == 6) {
*class = 'S';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 128 &&
d2 == 128 &&
d3 == 32 &&
nt == 6) {
*class = 'W';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 256 &&
d2 == 256 &&
d3 == 128 &&
nt == 6) {
*class = 'A';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 256 &&
d3 == 256 &&
nt == 20) {
*class = 'B';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 512 &&
d3 == 512 &&
nt == 20) {
*class = 'C';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
}
if (*class != 'U') {
printf("Result verification successful\n");
} else {
printf("Result verification failed\n");
}
printf("class = %1c\n", *class);
}
#define STACK_SIZE (8 * 1024 * 1024)
int main(int argc, char** argv)
{
if (argc != 2) { /* Print usage */
printf("Usage: %s <Number of threads>\n", argv[0]);
exit(-1);
}
#ifdef BOMP
backend_span_domain(atoi(argv[1]), STACK_SIZE);
bomp_custom_init();
backend_thread_create_varstack(realmain, (void*)((uint64_t)atoi(argv[1])),
STACK_SIZE);
backend_thread_exit();
#else /* BOMP */
realmain(atoi(argv[1]));
#endif /* BOMP */
}
|
aln_run.c | #include "tldevel.h"
#include "tlrng.h"
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include "msa.h"
#include "alignment_parameters.h"
#include "aln_task.h"
#include "aln_struct.h"
#include "aln_mem.h"
#include "aln_setup.h"
#include "aln_controller.h"
#define ALN_RUN_IMPORT
#include "aln_run.h"
static int score_aln(struct aln_mem* m,float** profile, struct msa* msa, int a,int b,int numseq,float* score);
static int SampleWithoutReplacement(struct rng_state* rng, int N, int n,int* samples);
static int int_cmp(const void *a, const void *b);
//static int do_align(struct msa* msa, struct aln_param* ap,struct aln_mem* m, int a,int b, int c);
static int do_align(struct msa* msa,struct aln_tasks* t,struct aln_mem* m, int task_id);
int do_align(struct msa* msa,struct aln_tasks* t,struct aln_mem* m, int task_id)
{
int a,b,c;
int len_a;
int len_b;
int j,g;
int numseq;
a = t->list[task_id]->a;
b = t->list[task_id]->b;
c = t->list[task_id]->c;
numseq = msa->numseq;
if(a < numseq){
len_a = msa->sequences[a]->len;// aln->sl[a];
}else{
len_a = msa->plen[a];
}
if(b < numseq){
len_b = msa->sequences[b]->len;// aln->sl[b];
}else{
len_b = msa->plen[b];
}
g = (len_a > len_b)? len_a:len_b;
MMALLOC(t->map[c],sizeof(int) * (g+2));
RUN(resize_aln_mem(m, g));
/* I should not need to do this */
for (j = 0; j < (g+2);j++){
t->map[c][j] = -1;
}
if (a < numseq){
RUN(make_profile_n(m->ap, msa->sequences[a]->s,len_a,&t->profile[a]));
}else{
RUN(set_gap_penalties_n(t->profile[a],len_a,msa->nsip[b]));
}
if (b < numseq){
RUN(make_profile_n(m->ap, msa->sequences[b]->s,len_b,&t->profile[b]));
}else{
RUN(set_gap_penalties_n(t->profile[b],len_b,msa->nsip[a]));
}
init_alnmem(m, len_a, len_b);
//fprintf(stderr,"LENA:%d LENB:%d numseq:%d\n",len_a,len_b,numseq);
if(a < numseq){
if(b < numseq){
m->seq1 = msa->sequences[a]->s;
m->seq2 = msa->sequences[b]->s;
m->prof1 = NULL;
m->prof2 = NULL;
/* ap->mode = ALN_MODE_SCORE_ONLY; */
/* aln_runner(m, ap, map[c]); */
/* LOG_MSG("SCORE: %f", ap->score); */
m->mode = ALN_MODE_FULL;
#ifdef HAVE_OPENMP
/* omp_set_num_threads(4); */
#pragma omp parallel
// Only the first thread will spawn other threads
#pragma omp single nowait
{
#endif
aln_runner(m, t->map[c]);
//hirsch_ss_dyn(ap,msa->sequences[a]->s, msa->sequences[b]->s,hm,map[c]);
#ifdef HAVE_OPENMP
}
#endif
}else{
m->enda = len_b;
m->endb = len_a;
m->len_a = len_b;
m->len_b = len_a;
m->seq1 = NULL;
m->seq2 = msa->sequences[a]->s;
m->prof1 = t->profile[b];
m->prof2 = NULL;
m->sip = msa->nsip[b];
/* ap->mode = ALN_MODE_SCORE_ONLY; */
/* aln_runner(m, ap, map[c]); */
/* LOG_MSG("SCORE: %f", ap->score); */
m->mode = ALN_MODE_FULL;
#ifdef HAVE_OPENMP
/* omp_set_num_threads(4); */
#pragma omp parallel
// Only the first thread will spawn other threads
#pragma omp single nowait
{
#endif
aln_runner(m,t->map[c]);
#ifdef HAVE_OPENMP
}
#endif
//hirsch_ps_dyn(ap,profile[b], msa->sequences[a]->s,hm,map[c],msa->nsip[b]);
RUN(mirror_path_n(&t->map[c],len_a,len_b));
//RUNP(map[c] = mirror_hirsch_path(map[c],len_a,len_b));
}
}else{
if(b < numseq){
m->seq1 = NULL;
m->seq2 = msa->sequences[b]->s;
m->prof1 = t->profile[a];
m->prof2 = NULL;
m->sip = msa->nsip[a];
/* m->mode = ALN_MODE_SCORE_ONLY; */
/* aln_runner(m, ap, map[c]); */
/* LOG_MSG("SCORE: %f", m->score); */
m->mode = ALN_MODE_FULL;
#ifdef HAVE_OPENMP
/* omp_set_num_threads(4); */
#pragma omp parallel
// Only the first thread will spawn other threads
#pragma omp single nowait
{
#endif
aln_runner(m,t->map[c]);
#ifdef HAVE_OPENMP
}
#endif
//hirsch_ps_dyn(ap,profile[a],msa->sequences[b]->s ,hm,map[c],msa->nsip[a]);
}else{
if(len_a < len_b){
m->seq1 = NULL;
m->seq2 = NULL;
m->prof1 = t->profile[a];
m->prof2 = t->profile[b];
/* m->mode = ALN_MODE_SCORE_ONLY; */
/* aln_runner(m, ap, map[c]); */
/* LOG_MSG("SCORE: %f", m->score); */
m->mode = ALN_MODE_FULL;
aln_runner(m, t->map[c]);
//hirsch_pp_dyn(profile[a],profile[b],hm,map[c]);
}else{
m->enda = len_b;
m->endb = len_a;
m->len_a = len_b;
m->len_b = len_a;
m->seq1 = NULL;
m->seq2 = NULL;
m->prof1 = t->profile[b];
m->prof2 = t->profile[a];
/* m->mode = ALN_MODE_SCORE_ONLY; */
/* aln_runner(m, ap, map[c]); */
/* LOG_MSG("SCORE: %f", m->score); */
m->mode = ALN_MODE_FULL;
#ifdef HAVE_OPENMP
/* omp_set_num_threads(4); */
#pragma omp parallel
// Only the first thread will spawn other threads
#pragma omp single nowait
{
#endif
aln_runner(m, t->map[c]);
#ifdef HAVE_OPENMP
}
#endif
//hirsch_pp_dyn(profile[b],profile[a],hm,map[c]);
RUN(mirror_path_n(&t->map[c],len_a,len_b));
//RUNP(map[c] = mirror_hirsch_path(map[c],len_a,len_b));
}
}
}
RUN(add_gap_info_to_path_n(&t->map[c], len_a, len_b));
//map[c] = add_gap_info_to_hirsch_path(map[c],len_a,len_b);
if(task_id != t->n_tasks-1){
//if(i != numseq-2){
//MREALLOC(profile_ptr, sizeof(float)*64*(map[c][0]+2));
MMALLOC(t->profile[c],sizeof(float)*64*(t->map[c][0]+2));
//update(profile[a],profile[b],profile[c],map[c]);
update_n(t->profile[a],t->profile[b],t->profile[c],m->ap,t->map[c],msa->nsip[a],msa->nsip[b]);
}
msa->plen[c] = t->map[c][0];
msa->nsip[c] = msa->nsip[a] + msa->nsip[b];
MMALLOC(msa->sip[c],sizeof(int)*(msa->nsip[a] + msa->nsip[b]));
g =0;
for (j = msa->nsip[a];j--;){
msa->sip[c][g] = msa->sip[a][j];
g++;
}
for (j = msa->nsip[b];j--;){
msa->sip[c][g] = msa->sip[b][j];
g++;
}
MFREE(t->profile[a]);
MFREE(t->profile[b]);
return OK;
ERROR:
return FAIL;
}
#ifdef HAVE_OPENMP
int create_msa_openMP(struct msa* msa, struct aln_param* ap,struct aln_tasks* t)
{
int i,j,g,s;
struct aln_mem** m = NULL;
int n_threads = omp_get_max_threads();
MMALLOC(m, sizeof(struct aln_mem*) * n_threads);
for(i = 0; i < n_threads;i++){
m[i] = NULL;
RUN(alloc_aln_mem(&m[i], 2048));
m[i]->ap = ap;
m[i]->mode = ALN_MODE_FULL;
}
//LOG_MSG(" Allocated %d threads", n_threads);
s = 0;
g = t->list[0]->p;
for(i = 0; i < t->n_tasks;i++){
if(t->list[i]->p != g){
#pragma omp parallel for shared(msa,t,m,s,i) private(j)
for(j = s; j < i;j++){
int tid = omp_get_thread_num();
//fprintf(stdout,"%3d %3d -> %3d (p: %d) running on %d\n", t->list[j]->a, t->list[j]->b, t->list[j]->c, t->list[j]->p,tid);
do_align(msa,t,m[tid],j);
}
//fprintf(stdout,"\n");
g =t->list[i]->p;
s = i;
}
}
for(j = s; j < i;j++){
//fprintf(stdout,"%3d %3d -> %3d (p: %d)\n", t->list[j]->a, t->list[j]->b, t->list[j]->c, t->list[j]->p);
do_align(msa,t,m[0],j);
}
for(i = 0; i < n_threads;i++){
free_aln_mem(m[i]);
}
MFREE(m);
return OK;
ERROR:
if(m){
for(i = 0; i < n_threads;i++){
free_aln_mem(m[i]);
}
MFREE(m);
}
return FAIL;
}
#endif
int create_msa_serial(struct msa* msa, struct aln_param* ap,struct aln_tasks* t)
{
int i,j,g,s;
struct aln_mem* m = NULL;
RUN(alloc_aln_mem(&m, 2048));
m->ap = ap;
m->mode = ALN_MODE_FULL;
s = 0;
g = t->list[0]->p;
for(i = 0; i < t->n_tasks;i++){
// fprintf(stdout,"i=%d, n_tasks=%d\n",i,t->n_tasks);
if(t->list[i]->p != g){
for(j = s; j < i;j++){
// fprintf(stdout,"%3d %3d -> %3d (p: %d)\n", t->list[j]->a, t->list[j]->b, t->list[j]->c, t->list[j]->p);
do_align(msa,t,m,j);
}
// fprintf(stdout,"\n");
g =t->list[i]->p;
s = i;
}
}
for(j = s; j < i;j++){
// fprintf(stdout,"%3d %3d -> %3d (p: %d)\n", t->list[j]->a, t->list[j]->b, t->list[j]->c, t->list[j]->p);
do_align(msa,t,m,j);
}
free_aln_mem(m);
return OK;
ERROR:
if(m){
free_aln_mem(m);
}
return FAIL;
}
int create_chaos_msa(struct msa* msa, struct aln_param* ap,struct aln_tasks* t)
{
struct aln_mem* m = NULL;
int i,g,f,a,b,l;
int best_a, best_b;
int* samples = NULL;
int* active = NULL;
float max_score;
float score;
int numseq;
m->mode = ALN_MODE_FULL;
g = msa->num_profiles;
numseq = msa->numseq;
RUN(alloc_aln_mem(&m, 2048));
m->ap = ap;
MMALLOC(samples,sizeof(int) * m->ap->chaos);
MMALLOC(active, sizeof(int) * numseq);
for(i = 0; i < numseq;i++){
active[i] = i;
}
qsort(active, numseq, sizeof(int), int_cmp);
for(i = 0; i < numseq-1;i++){
/* pick one sequence / profile */
max_score = -FLT_MAX;
l = MACRO_MIN(ap->chaos, numseq-i);
SampleWithoutReplacement(ap->rng, numseq-i, l, samples);
for(g = 0;g < l-1;g++){
a = samples[g];
for(f = g + 1; f < l;f++){
b = samples[f];
score_aln(m, t->profile, msa, active[a], active[b], numseq, &score);
//LOG_MSG("TEsting %d %d : %f", a,b, ap->score);
if(m->score > max_score){
best_a = a;
best_b = b;
max_score = m->score;
}
}
}
/* //LOG_MSG("L:%d", l); */
/* for(g = 0; g < l;g++){ */
/* a = tl_random_int(ap->rng, numseq-i); */
/* b = tl_random_int(ap->rng, numseq-i); */
/* while(b == a){ */
/* b = tl_random_int(ap->rng, numseq-i); */
/* } */
/* score_aln(m, ap, profile, msa, active[a], active[b], numseq, &score); */
/* //LOG_MSG("TEsting %d %d : %f", a,b, ap->score); */
/* if(ap->score > max_score){ */
/* best_a = a; */
/* best_b = b; */
/* max_score = ap->score; */
/* } */
/* } */
//exit(0);
a = best_a;
b = best_b;
//LOG_MSG("samples: %d %d", active[a],active[b]);
t->list[i]->a = active[a];
t->list[i]->b = active[b];
t->list[i]->c = numseq+i;
/* ap->tree[i*3] = active[a]; */
/* ap->tree[i*3+1] = active[b]; */
/* ap->tree[i*3+2] = numseq+i; */
active[a] = numseq+i;
active[b] = -1;
qsort(active, numseq-i, sizeof(int), int_cmp);
/* a = ap->tree[i*3]; */
/* b = ap->tree[i*3+1]; */
/* c = ap->tree[i*3+2]; */
do_align(msa,t,m,i);
//score_aln(m, ap, profile, msa, a, b, numseq, &score);
//fprintf(stdout,"Aligning:%d %d->%d done:%f score:%f\n",a,b,c,((float)(i+1)/(float)numseq)*100,score);
}
MFREE(active);
MFREE(samples);
free_aln_mem(m);
return OK;
ERROR:
if(m){
free_aln_mem(m);
}
return FAIL;
}
int int_cmp(const void *a, const void *b)
{
const int *ia = (const int *)a; // casting pointer types
const int *ib = (const int *)b;
return *ib - *ia;
}
int SampleWithoutReplacement(struct rng_state* rng, int N, int n,int* samples)
{
int t = 0; // total input records dealt with
int m = 0; // number of items selected so far
double u;
while (m < n)
{
u = tl_random_double(rng);
//u = GetUniform(); // call a uniform(0,1) random number generator
if ( (N - t)*u >= n - m ){
t++;
}else{
samples[m] = t;
t++;
m++;
}
}
return OK;
}
int score_aln(struct aln_mem* m,float** profile, struct msa* msa, int a,int b,int numseq,float* score)
{
int g;
int len_a;
int len_b;
if(a < numseq){
len_a = msa->sequences[a]->len;// aln->sl[a];
}else{
len_a = msa->plen[a];
}
if(b < numseq){
len_b = msa->sequences[b]->len;// aln->sl[b];
}else{
len_b = msa->plen[b];
}
m->mode = ALN_MODE_SCORE_ONLY;
g = (len_a > len_b)? len_a:len_b;
RUN(resize_aln_mem(m, g));
if (a > numseq){
RUN(set_gap_penalties_n(profile[a],len_a,msa->nsip[b]));
}
if (b > numseq){
RUN(set_gap_penalties_n(profile[b],len_b,msa->nsip[a]));
}
init_alnmem(m, len_a, len_b);
//fprintf(stderr,"LENA:%d LENB:%d numseq:%d\n",len_a,len_b,numseq);
if(a < numseq){
if(b < numseq){
m->seq1 = msa->sequences[a]->s;
m->seq2 = msa->sequences[b]->s;
m->prof1 = NULL;
m->prof2 = NULL;
aln_runner(m, NULL);
}else{
m->enda = len_b;
m->endb = len_a;
m->len_a = len_b;
m->len_b = len_a;
m->seq1 = NULL;
m->seq2 = msa->sequences[a]->s;
m->prof1 = profile[b];
m->prof2 = NULL;
m->sip = msa->nsip[b];
aln_runner(m, NULL);
m->score = m->score / msa->nsip[b];
}
}else{
if(b < numseq){
m->seq1 = NULL;
m->seq2 = msa->sequences[b]->s;
m->prof1 = profile[a];
m->prof2 = NULL;
m->sip = msa->nsip[a];
aln_runner(m, NULL);
m->score = m->score / msa->nsip[a];
}else{
if(len_a < len_b){
m->seq1 = NULL;
m->seq2 = NULL;
m->prof1 = profile[a];
m->prof2 = profile[b];
aln_runner(m, NULL);
}else{
m->enda = len_b;
m->endb = len_a;
m->len_a = len_b;
m->len_b = len_a;
m->seq1 = NULL;
m->seq2 = NULL;
m->prof1 = profile[b];
m->prof2 = profile[a];
aln_runner(m, NULL);
}
m->score = m->score / (msa->nsip[a] * msa->nsip[b]);
}
}
*score = m->score;
return OK;
ERROR:
return FAIL;
}
|
GB_binop__pair_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pair_bool
// A.*B function (eWiseMult): GB_AemultB__pair_bool
// A*D function (colscale): GB_AxD__pair_bool
// D*A function (rowscale): GB_DxB__pair_bool
// C+=B function (dense accum): GB_Cdense_accumB__pair_bool
// C+=b function (dense accum): GB_Cdense_accumb__pair_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_bool
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = 1
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = 1 ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_BOOL || GxB_NO_PAIR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pair_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pair_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pair_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__pair_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__pair_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__pair_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pair_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
binarytrees.gcc-3.c | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// Contributed by Jeremy Zerfas
// Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho.
// *reset*
// This controls the width of lines that are output by this program.
#define MAXIMUM_LINE_WIDTH 60
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems.
#include <apr_pools.h>
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
typedef struct tree_node{
struct tree_node * left_Node, * right_Node;
} tree_node;
// Create a binary tree of depth tree_Depth in memory_Pool, set the root node's
// value to root_Node_Value, and finally return a pointer to the created binary
// tree.
static inline tree_node * create_Tree(const intnative_t tree_Depth,
apr_pool_t * const memory_Pool){
tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node));
// If tree_Depth is one or more then recursively call create_Tree() in order
// to create the left and right subtrees using 2*root_Node_Value-1 and
// 2*root_Node_Value respectively as the root values for those subtrees.
if(tree_Depth>0){
root_Node->left_Node=create_Tree(tree_Depth-1, memory_Pool);
root_Node->right_Node=create_Tree(tree_Depth-1, memory_Pool);
}else
root_Node->left_Node=root_Node->right_Node=NULL;
return root_Node;
}
// Compute and return the checksum for the binary tree that has root_Node as the
// root node.
static inline intnative_t compute_Tree_Checksum(
const tree_node * const root_Node){
// If there are subtrees then recursively call compute_Tree_Checksum() on
// them and factor their values into the checksum, otherwise just return
// the value of root_Node.
if(root_Node->left_Node)
return compute_Tree_Checksum(root_Node->left_Node)+
compute_Tree_Checksum(root_Node->right_Node)+1;
else
return 1;
}
int main(int argc, char ** argv){
// Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what
// was specified as the argument to the program and minimum_Tree_Depth+2.
const intnative_t minimum_Tree_Depth=4;
intnative_t maximum_Tree_Depth=atoi(argv[1]);
if(maximum_Tree_Depth < minimum_Tree_Depth+2)
maximum_Tree_Depth=minimum_Tree_Depth+2;
apr_initialize();
apr_pool_t * memory_Pool;
// Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1,
// compute the checksum of the binary tree, print the statistics, and then
// delete the memory pool.
apr_pool_create_unmanaged(&memory_Pool);
tree_node * stretch_Tree=create_Tree(maximum_Tree_Depth+1, memory_Pool);
printf("stretch tree of depth %jd\t check: %jd\n",
(intmax_t)maximum_Tree_Depth+1,
(intmax_t)compute_Tree_Checksum(stretch_Tree));
apr_pool_destroy(memory_Pool);
// Create a memory pool and then create a long-lived binary tree of depth
// maximum_Tree_Depth which will be left alone for a while while
// more binary trees get allocated and deallocaited as required by the
// rules. We'll finish working with this later.
apr_pool_create_unmanaged(&memory_Pool);
tree_node * long_Lived_Tree=create_Tree(maximum_Tree_Depth, memory_Pool);
// Create a lot of binary trees in parallel of depths ranging from
// minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their
// checksums, destroy the trees, and then record the statistics to
// output_Buffer[] so they can be displayed in order later.
char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1];
intnative_t current_Tree_Depth;
#pragma omp parallel for
for(current_Tree_Depth=minimum_Tree_Depth;
current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){
intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+
minimum_Tree_Depth);
// Create a memory pool for this thread to use.
apr_pool_t * thread_Memory_Pool;
apr_pool_create_unmanaged(&thread_Memory_Pool);
intnative_t i=1, total_Trees_Checksum=0;
for(; i<=iterations; ++i){
// Create a binary tree of depth current_Tree_Depth
tree_node * const tree_1=create_Tree(current_Tree_Depth,
thread_Memory_Pool);
total_Trees_Checksum+=compute_Tree_Checksum(tree_1);
apr_pool_clear(thread_Memory_Pool);
}
apr_pool_destroy(thread_Memory_Pool);
// Record the statistics for the trees of depth current_Tree_Depth.
sprintf(output_Buffer[current_Tree_Depth],
"%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)iterations,
(intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum);
}
// Print the statistics for all of the various tree depths.
for(current_Tree_Depth=minimum_Tree_Depth;
current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2)
printf("%s", output_Buffer[current_Tree_Depth]);
// Compute the checksum of the long-lived binary tree that we created
// earlier, print the statistics, and then delete the memory pool.
printf("long lived tree of depth %jd\t check: %jd\n",
(intmax_t)maximum_Tree_Depth,
(intmax_t)compute_Tree_Checksum(long_Lived_Tree));
apr_pool_destroy(memory_Pool);
apr_terminate();
return 0;
}
|
GI.h | #include <parse.h>
#include <unordered_set>
#define SELF_GRAVITY
#define FLAG_GI
#ifdef PARTICLE_SIMULATOR_TWO_DIMENSION
#error
#endif
std::unordered_set <unsigned int> create_removal_list (const unsigned int lowest_index, const unsigned int highest_index,const unsigned int number_of_removed_items){
std::unordered_set<unsigned int> removal_list;
if (number_of_removed_items == 0)
return removal_list;
while (removal_list.size() < number_of_removed_items){
const unsigned int num = rand () % static_cast<unsigned int>(highest_index - lowest_index) + lowest_index;
// This works even if num is already in the list, because the unordered_set filters out duplicates
removal_list.insert(num);
}
return removal_list;
}
template <class Ptcl> class GI : public Problem<Ptcl>{
public:
static double end_time;
static double damping;
static void setupIC(PS::ParticleSystem<Ptcl>& sph_system, system_t& sysinfo, PS::DomainInfo& dinfo,
ParameterFile ¶meter_file){
const double Corr = .98;//Correction Term
/////////
//place ptcls
/////////
std::vector<Ptcl> ptcl;
std::vector<Ptcl> tar;//Target
std::vector<Ptcl> imp;//Impactor
/////////
// Use parameters from input file, or defaults if none provided
PS::F64 UnitMass = parameter_file.getValueOf("UnitMass", 6.0e+24);
PS::F64 UnitRadi = parameter_file.getValueOf("UnitRadi", 6400e+3);
PS::F64 coreFracRadi = parameter_file.getValueOf("coreFracRadi", 3500.0e+3 / 6400.0e+3);
PS::F64 coreFracMass = parameter_file.getValueOf("coreFracMass", 0.3);
PS::F64 imptarMassRatio = parameter_file.getValueOf("imptarMassRatio", 0.1);
const unsigned int mode = parameter_file.getValueOf("mode", 2 );
PS::F64 impVel = parameter_file.getValueOf("impVel",0.);
PS::F64 impAngle = parameter_file.getValueOf("impact_angle",0.) /180.0 * math::pi; //converting from degree to radian
end_time = parameter_file.getValueOf("end_time",1.0e+4);
damping = parameter_file.getValueOf("damping",1.);
PS::F64 Nptcl = parameter_file.getValueOf("total_number_of_particles", 100000);
const PS::F64 Expand = 1.1;
const PS::F64 tarMass = UnitMass;
const PS::F64 tarRadi = UnitRadi;
const PS::F64 tarCoreMass = tarMass * coreFracMass;
const PS::F64 tarCoreRadi = tarRadi * coreFracRadi;
const PS::F64 impMass = imptarMassRatio * tarMass;
const PS::F64 impRadi = Expand * cbrt(impMass / tarMass) * UnitRadi;
const PS::F64 impCoreMass = impMass * coreFracMass;
const PS::F64 impCoreRadi = impRadi * coreFracRadi;
const double offset = 5.0 * UnitRadi;
/* the following line predicts the number of grid points in one direction
The volume of a recutangular box whose radius is L^3
The volume of a sphere whose radius is L/2 is 4 \pi/3 (L/2)^3
dx is defined as the grid size in one direction
Nptcl = (volume of a sphere)/(dx)^3 * L^3, where L = 2.0
dx = (4.0/3.0 * math::pi/Nptcl)^{1/3}
The number of grid point is 2.0/dx
we multiply by 1.1 so that enough particles are created to generate a sphere */
const int gridpoint = int(2.0/pow(4.0/3.0 * math::pi * 1.1/Nptcl,0.333));
const PS::F64 dx = 2.0/gridpoint;
const PS::F64 Grav = 6.67e-11;
//target
int tarNptcl = 0;
int tarNmntl = 0;
int tarNcore = 0;
//impactor
double tarCoreShrinkFactor = 1.0;
int impNmntl = 0;
int impNcore = 0;
int impNptcl = 0;
const int NptclIn1Node = Nptcl / PS::Comm::getNumberOfProc();
PS::S32 id = 0;
switch (mode){
case 1:
// This mode will enable to create a target and an imapctor from input/tar.dat and input/imp.dat
{
std::cout << "creating target from tar.dat" << std::endl;
FILE * tarFile;
tarFile = fopen("input/tar.dat","r");
FileHeader tarheader;
int nptcltar;
nptcltar = tarheader.readAscii(tarFile);
std::cout << "num tar ptcl: " << nptcltar << std::endl;
for(int i=0; i<nptcltar; i++){
Ptcl ith;
ith.readAscii(tarFile);
if(ith.id / NptclIn1Node == PS::Comm::getRank()) tar.push_back(ith);
}
for(PS::U32 i = 0 ; i < tar.size() ; ++ i){
ptcl.push_back(tar[i]);
}
for(PS::U32 i = 0 ; i < tar.size() ; ++ i){
if (tar[i].tag==0){
tarNmntl += 1;
}else{
tarNcore += 1;
}
}
tarNptcl = tarNmntl + tarNcore;
std::cout << "creating impactor from imp.dat" << std::endl;
FILE * impFile;
impFile = fopen("input/imp.dat","r");
FileHeader impheader;
int nptclimp;
nptclimp = impheader.readAscii(impFile);
std::cout << "num imp ptcl: " << nptclimp << std::endl;
for(int i=0; i<nptclimp; i++){
Ptcl ith;
ith.readAscii(impFile);
ith.vel.x += (-1) * cos(impAngle) * impVel;
ith.vel.y += (-1) * sin(impAngle) * impVel;
ith.pos.x += (impRadi + tarRadi) * cos(impAngle);
ith.pos.y += (impRadi + tarRadi) * sin(impAngle);
if(ith.id / NptclIn1Node == PS::Comm::getRank()) imp.push_back(ith);
}
for(PS::U32 i = 0 ; i < imp.size() ; ++ i){
ptcl.push_back(imp[i]);
if (imp[i].tag==0){
impNmntl += 1;
}else{
impNcore += 1;
}
}
impNptcl = impNmntl + impNcore;
Nptcl = tarNptcl + impNptcl;
break;
}
case 2:
// This mode will create an initial condition
{
///////////////////
//Dummy put to determine # of ptcls
///////////////////
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= tarRadi || r <= tarCoreRadi) continue;
++ tarNmntl;
}
}
}
while(tarCoreShrinkFactor *= 0.99){
tarNcore = 0;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = tarCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= Corr * tarCoreRadi) continue;
++ tarNcore;
}
}
}
if((double)(tarNcore) / (double)(tarNcore + tarNmntl) > coreFracMass) break;
}
///////////////////
//Dummy end
///////////////////
// checking if there are enough mantle particles
if (tarNmntl < static_cast<int>(Nptcl * (1.0-coreFracMass))){
std::cout << "Too few mantle particles. Increase the grid size. The easiest fix is to increase the gridpoint in GI.h " << std::endl;
exit(0);
}
// checking if there are enough core particles
if (tarNcore < static_cast<int>(Nptcl * coreFracMass)){
std::cout << "Too few core particles. Increase the grid size and/or change the core shrink factor." << std::endl;
exit(0);
}
//removing particles to reach the exact Nptcl
int index = 0;
std::unordered_set<unsigned int> removal_list;
removal_list = create_removal_list (0, tarNmntl, tarNmntl - static_cast<int>(Nptcl * (1.0-coreFracMass)));
std::cout << "creating target" << std::endl;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= tarRadi || r <= tarCoreRadi) continue;
Ptcl ith;
ith.pos.x = UnitRadi * x;
ith.pos.y = UnitRadi * y;
ith.pos.z = UnitRadi * z;
ith.dens = (tarMass - tarCoreMass) / (4.0 / 3.0 * math::pi * (tarRadi * tarRadi * tarRadi - tarCoreRadi * tarCoreRadi * tarCoreRadi));
ith.mass = tarMass + impMass;
ith.eng = 0.1 * Grav * tarMass / tarRadi;
ith.id = id++;
ith.setPressure(&AGranite);
ith.tag = 0;
if (removal_list.count(index)){
id += -1;
}else if (ith.id / NptclIn1Node == PS::Comm::getRank()){
tar.push_back(ith);
}
index += 1;
}
}
}
std::cout << "# of mantle particles = " << id << std::endl;
// making the core condition
removal_list.clear();
removal_list = create_removal_list (tarNmntl, tarNmntl + tarNcore, tarNcore - static_cast<int>(Nptcl * coreFracMass));
index = tarNmntl;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = tarCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= Corr * tarCoreRadi) continue;
Ptcl ith;
ith.pos.x = tarCoreShrinkFactor * UnitRadi * x;
ith.pos.y = tarCoreShrinkFactor * UnitRadi * y;
ith.pos.z = tarCoreShrinkFactor * UnitRadi * z;
ith.dens = tarCoreMass / (4.0 / 3.0 * math::pi * tarCoreRadi * tarCoreRadi * tarCoreRadi * Corr * Corr * Corr);
ith.mass = tarMass + impMass;
ith.eng = 0.1 * Grav * tarMass / tarRadi;
ith.id = id++;
ith.setPressure(&Iron);
ith.tag = 1;
if (removal_list.count(index)){
id += -1;
}else{
if(ith.id / NptclIn1Node == PS::Comm::getRank()) tar.push_back(ith);
}
index += 1;
}
}
}
std::cout << "# of total particles = " << id << std::endl;
tarNmntl = static_cast<int>(Nptcl * (1.0 - coreFracMass));
tarNcore = static_cast<int>(Nptcl * coreFracMass);
for(PS::U32 i = 0 ; i < tar.size() ; ++ i){
tar[i].mass /= (PS::F64)(Nptcl);
}
for(PS::U32 i = 0 ; i < tar.size() ; ++ i){
ptcl.push_back(tar[i]);
}
break;
}
}
tarNptcl = tarNcore + tarNmntl;
impNptcl = impNcore + impNmntl;
std::cout << "Target :" << tarNptcl << std::endl;
std::cout << " radius : " << tarRadi << std::endl;
std::cout << " total-to-core : " << (double)(tarNcore) / (double)(tarNptcl) << std::endl;
std::cout << " # of core ptcls : " << tarNcore << std::endl;
std::cout << " # of mantle ptcls: " << tarNmntl << std::endl;
std::cout << " core density : " << tarCoreMass / (4.0 * math::pi / 3.0 * tarCoreRadi * tarCoreRadi * tarCoreRadi * Corr * Corr * Corr) << std::endl;
std::cout << " mantle density : " << (tarMass - tarCoreMass) / (4.0 * math::pi / 3.0 * (tarRadi * tarRadi * tarRadi - tarCoreRadi * tarCoreRadi * tarCoreRadi)) << std::endl;
std::cout << " mean density : " << tarMass / (4.0 * math::pi / 3.0 * tarRadi * tarRadi * tarRadi) << std::endl;
if (mode==1){
std::cout << "Impactor:" << impNptcl << std::endl;
std::cout << " radius : " << impRadi << std::endl;
std::cout << " total-to-core : " << (double)(impNcore) / (double)(impNptcl) << std::endl;
std::cout << " # of core ptcls : " << impNcore << std::endl;
std::cout << " # of mantle ptcls: " << impNmntl << std::endl;
std::cout << " core density : " << impCoreMass / (4.0 * math::pi / 3.0 * impCoreRadi * impCoreRadi * impCoreRadi * Corr * Corr * Corr) << std::endl;
std::cout << " mantle density : " << (impMass - impCoreMass) / (4.0 * math::pi / 3.0 * (impRadi * impRadi * impRadi - impCoreRadi * impCoreRadi * impCoreRadi)) << std::endl;
std::cout << " mean density : " << impMass / (4.0 * math::pi / 3.0 * impRadi * impRadi * impRadi) << std::endl;
std::cout << " velocity : " << impVel << std::endl;
std::cout << "Tar-to-Imp mass ratio: " << (double)(impNmntl) / (double)(tarNmntl) << std::endl;
}
assert(Nptcl == tarNptcl + impNptcl);
std::cout << "Total number of particles:" << Nptcl << std::endl;
const PS::S32 numPtclLocal = ptcl.size();
sph_system.setNumberOfParticleLocal(numPtclLocal);
for(PS::U32 i = 0 ; i < ptcl.size() ; ++ i){
sph_system[i] = ptcl[i];
}
//Fin.
std::cout << "# of ptcls = " << ptcl.size() << std::endl;
std::cout << "setup..." << std::endl;
}
static void setEoS(PS::ParticleSystem<Ptcl>& sph_system){
for(PS::U64 i = 0 ; i < sph_system.getNumberOfParticleLocal() ; ++ i){
// TODO: Modify the lines below for all particles that need new EoS
if(sph_system[i].tag % 2 == 0){
sph_system[i].setPressure(&AGranite);
}else{
sph_system[i].setPressure(&Iron);
}
}
}
static void addExternalForce(PS::ParticleSystem<Ptcl>& sph_system, system_t& sysinfo){
if(sysinfo.time >= 5000) return;
std::cout << "Add Ext. Force!!!" << std::endl;
#pragma omp parallel for
for(PS::S32 i = 0 ; i < sph_system.getNumberOfParticleLocal() ; ++ i){
sph_system[i].acc += - sph_system[i].vel * 0.05 / sph_system[i].dt;
}
}
};
|
mask_image_from_size.h | // Copyright 2017 Joan Puigcerver
#ifndef NNUTILS_CPU_MASK_IMAGE_FROM_SIZE_H_
#define NNUTILS_CPU_MASK_IMAGE_FROM_SIZE_H_
#include <cassert>
#ifdef __cplusplus
namespace nnutils {
namespace cpu {
template <typename T, typename Int>
void mask_image_from_size(const Int N, const Int C, const Int H, const Int W,
const Int* sizes, T* im, const T mask = 0) {
assert(N > 0 && C > 0 && H > 0 && W > 0);
assert(sizes != nullptr);
assert(im != nullptr);
// TODO(joapuipe): Depending on the number of elements to mask, it may be
// more efficient to parallelize only across N and C, and mask only the
// in pixels y >= im_h or x >= im_w.
#pragma omp parallel for collapse(4)
for (Int n = 0; n < N; ++n) {
for (Int c = 0; c < C; ++c) {
for (int y = 0; y < H; ++y) {
for (int x = 0; x < W; ++x) {
const Int im_h = sizes[2 * n];
const Int im_w = sizes[2 * n + 1];
if (y >= im_h || x >= im_w) {
im[n * C * H * W + c * H * W + y * W + x] = mask;
}
}
}
}
}
}
} // namespace cpu
} // namespace nnutils
#endif // __cplusplus
#endif // NNUTILS_CPU_MASK_IMAGE_FROM_SIZE_H_
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
DRB097-target-teams-distribute-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#define min(x, y) (((x) < (y)) ? (x) : (y))
/*
use of omp target + teams + distribute + parallel for
*/
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int i2;
int len = 2560;
double sum = 0.0;
double sum2 = 0.0;
double a[len];
double b[len];
/*Initialize with some values*/
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
a[i] = ((double )i) / 2.0;
b[i] = ((double )i) / 3.0;
}
#pragma omp parallel for private (i,i2) reduction (+:sum)
for (i2 = 0; i2 <= len - 1; i2 += 256) {
#pragma omp parallel for private (i) reduction (+:sum)
for (i = i2; i <= ((i2 + 256 < len?i2 + 256 : len)) - 1; i += 1) {
sum += a[i] * b[i];
}
}
/* CPU reference computation */
#pragma omp parallel for private (i) reduction (+:sum2) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
sum2 += a[i] * b[i];
}
printf("sum=%f sum2=%f\n",sum,sum2);
return 0;
}
|
kClistDens.c | /*
Info:
Feel free to use these lines as you wish.
This program iterates over all k-cliques. It find a k(1+eps) approximation of the kclique densest. It is highly scallable.
To compile:
"gcc kClistDens.c -O9 -o kClistDens -fopenmp".
To execute:
"./kClistDens p k eps edgelist.txt".
"edgelist.txt" should contain the graph: one edge on each line separated by a space.
k is the size of the k-cliques
p is the number of threads
eps is the precision
Will print the number of k-cliques and the density of the found kclique densest.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#define NLINKS 100000000 //maximum number of edges for memory allocation, will increase if needed
typedef struct {
unsigned s;
unsigned t;
} edge;
typedef struct {
unsigned node;
unsigned deg;
} nodedeg ;
typedef struct {
unsigned n;//number of nodes
unsigned e;//number of edges
edge *edges;//list of edges
unsigned *rank;//ranking of the nodes according to degeneracy ordering
//unsigned *map;//oldID newID correspondance NOT USED IN THIS VERSION
} edgelist;
typedef struct {
unsigned n;
unsigned e;
edge *edges;//ading this again here: TO IMPROVE
unsigned *cd;//cumulative degree: (starts with 0) length=n+1
unsigned *adj;//truncated list of neighbors
unsigned core;//core value of the graph
} graph;
typedef struct {
unsigned *n;//n[l]: number of nodes in G_l
unsigned **d;//d[l]: degrees of G_l
unsigned *adj;//truncated list of neighbors
unsigned char *lab;//lab[i] label of node i
unsigned **nodes;//sub[l]: nodes in G_l
unsigned core;
} subgraph;
void free_graph(graph *g){
free(g->cd);
free(g->adj);
free(g);
}
void free_subgraph(subgraph *sg, unsigned char k){
unsigned char i;
free(sg->n);
for (i=1;i<k;i++){
free(sg->d[i]);
free(sg->nodes[i]);
}
free(sg->d);
free(sg->nodes);
free(sg->lab);
free(sg->adj);
free(sg);
}
//Compute the maximum of three unsigned integers.
inline unsigned int max3(unsigned int a,unsigned int b,unsigned int c){
a=(a>b) ? a : b;
return (a>c) ? a : c;
}
edgelist* readedgelist(char* input){
unsigned e1=NLINKS;
edgelist *el=malloc(sizeof(edgelist));
FILE *file;
el->n=0;
el->e=0;
file=fopen(input,"r");
el->edges=malloc(e1*sizeof(edge));
while (fscanf(file,"%u %u", &(el->edges[el->e].s), &(el->edges[el->e].t))==2) {//Add one edge
el->n=max3(el->n,el->edges[el->e].s,el->edges[el->e].t);
el->e++;
if (el->e==e1) {
e1+=NLINKS;
el->edges=realloc(el->edges,e1*sizeof(edge));
}
}
fclose(file);
el->n++;
el->edges=realloc(el->edges,el->e*sizeof(edge));
return el;
}
void relabel(edgelist *el){
unsigned i, source, target, tmp;
el->n=0;
//FILE* file=fopen("debug.txt","w");
for (i=0;i<el->e;i++) {
source=el->rank[el->edges[i].s];
target=el->rank[el->edges[i].t];
if (source<target){
tmp=source;
source=target;
target=tmp;
}
if (source+1>el->n){
el->n=source+1;
}
el->edges[i].s=source;
el->edges[i].t=target;
//fprintf(file,"%u %u\n",source,target);
}
//fclose(file);
}
///// CORE ordering /////////////////////
typedef struct {
unsigned key;
unsigned value;
} keyvalue;
typedef struct {
unsigned n_max; // max number of nodes.
unsigned n; // number of nodes.
unsigned *pt; // pointers to nodes.
keyvalue *kv; // nodes.
} bheap;
bheap *construct(unsigned n_max){
unsigned i;
bheap *heap=malloc(sizeof(bheap));
heap->n_max=n_max;
heap->n=0;
heap->pt=malloc(n_max*sizeof(unsigned));
for (i=0;i<n_max;i++) heap->pt[i]=-1;
heap->kv=malloc(n_max*sizeof(keyvalue));
return heap;
}
void swap(bheap *heap,unsigned i, unsigned j) {
keyvalue kv_tmp=heap->kv[i];
unsigned pt_tmp=heap->pt[kv_tmp.key];
heap->pt[heap->kv[i].key]=heap->pt[heap->kv[j].key];
heap->kv[i]=heap->kv[j];
heap->pt[heap->kv[j].key]=pt_tmp;
heap->kv[j]=kv_tmp;
}
void bubble_up(bheap *heap,unsigned i) {
unsigned j=(i-1)/2;
while (i>0) {
if (heap->kv[j].value>heap->kv[i].value) {
swap(heap,i,j);
i=j;
j=(i-1)/2;
}
else break;
}
}
void bubble_down(bheap *heap) {
unsigned i=0,j1=1,j2=2,j;
while (j1<heap->n) {
j=( (j2<heap->n) && (heap->kv[j2].value<heap->kv[j1].value) ) ? j2 : j1 ;
if (heap->kv[j].value < heap->kv[i].value) {
swap(heap,i,j);
i=j;
j1=2*i+1;
j2=j1+1;
continue;
}
break;
}
}
void insert(bheap *heap,keyvalue kv){
heap->pt[kv.key]=(heap->n)++;
heap->kv[heap->n-1]=kv;
bubble_up(heap,heap->n-1);
}
void update(bheap *heap,unsigned key){
unsigned i=heap->pt[key];
if (i!=-1){
((heap->kv[i]).value)--;
bubble_up(heap,i);
}
}
keyvalue popmin(bheap *heap){
keyvalue min=heap->kv[0];
heap->pt[min.key]=-1;
heap->kv[0]=heap->kv[--(heap->n)];
heap->pt[heap->kv[0].key]=0;
bubble_down(heap);
return min;
}
//Building the heap structure with (key,value)=(node,degree) for each node
bheap* mkheap(unsigned n,unsigned *v){
unsigned i;
keyvalue kv;
bheap* heap=construct(n);
for (i=0;i<n;i++){
kv.key=i;
kv.value=v[i];
insert(heap,kv);
}
return heap;
}
void freeheap(bheap *heap){
free(heap->pt);
free(heap->kv);
free(heap);
}
//computing degeneracy ordering and core value
void ord_core(edgelist* el){
unsigned i,j,r=0,n=el->n,e=el->e;
keyvalue kv;
bheap *heap;
unsigned *d0=calloc(el->n,sizeof(unsigned));
unsigned *cd0=malloc((el->n+1)*sizeof(unsigned));
unsigned *adj0=malloc(2*el->e*sizeof(unsigned));
for (i=0;i<e;i++) {
d0[el->edges[i].s]++;
d0[el->edges[i].t]++;
}
cd0[0]=0;
for (i=1;i<n+1;i++) {
cd0[i]=cd0[i-1]+d0[i-1];
d0[i-1]=0;
}
for (i=0;i<e;i++) {
adj0[ cd0[el->edges[i].s] + d0[ el->edges[i].s ]++ ]=el->edges[i].t;
adj0[ cd0[el->edges[i].t] + d0[ el->edges[i].t ]++ ]=el->edges[i].s;
}
heap=mkheap(n,d0);
el->rank=malloc(n*sizeof(unsigned));
for (i=0;i<n;i++){
kv=popmin(heap);
el->rank[kv.key]=n-(++r);
for (j=cd0[kv.key];j<cd0[kv.key+1];j++){
update(heap,adj0[j]);
}
}
freeheap(heap);
free(d0);
free(cd0);
free(adj0);
}
//////////////////////////
//Building the special graph
graph* mkgraph(edgelist *el){
unsigned i,max;
unsigned *d;
graph* g=malloc(sizeof(graph));
d=calloc(el->n,sizeof(unsigned));
for (i=0;i<el->e;i++) {
d[el->edges[i].s]++;
}
g->cd=malloc((el->n+1)*sizeof(unsigned));
g->cd[0]=0;
max=0;
for (i=1;i<el->n+1;i++) {
g->cd[i]=g->cd[i-1]+d[i-1];
max=(max>d[i-1])?max:d[i-1];
d[i-1]=0;
}
printf("core value (max truncated degree) = %u\n",max);
g->adj=malloc(el->e*sizeof(unsigned));
for (i=0;i<el->e;i++) {
g->adj[ g->cd[el->edges[i].s] + d[ el->edges[i].s ]++ ]=el->edges[i].t;
}
free(d);
g->core=max;
g->n=el->n;
free(el->rank);
g->edges=el->edges;
g->e=el->e;
//free(el);
// printf("el2=%u\n",el->e);
return g;
}
subgraph* allocsub(graph *g,unsigned char k){
unsigned i;
subgraph* sg=malloc(sizeof(subgraph));
sg->n=calloc(k,sizeof(unsigned));
sg->d=malloc(k*sizeof(unsigned*));
sg->nodes=malloc(k*sizeof(unsigned*));
for (i=1;i<k;i++){/////////
sg->d[i]=malloc(g->core*sizeof(unsigned));
sg->nodes[i]=malloc(g->core*sizeof(unsigned));
}
sg->lab=calloc(g->core,sizeof(unsigned char));
sg->adj=malloc(g->core*g->core*sizeof(unsigned));
sg->core=g->core;
return sg;
}
unsigned *old=NULL,*new=NULL;//to improve
#pragma omp threadprivate(new,old)
void mksub(graph* g,edge ed,subgraph* sg,unsigned char k){
unsigned i,j,l,x,y;
unsigned u=ed.s,v=ed.t;
if (old==NULL){
new=malloc(g->n*sizeof(unsigned));
old=malloc(g->core*sizeof(unsigned));
for (i=0;i<g->n;i++){
new[i]=-1;
}
}
for (i=0;i<sg->n[k-1];i++){
sg->lab[i]=0;
}
for (i=g->cd[v];i<g->cd[v+1];i++){
new[g->adj[i]]=-2;
}
j=0;
for (i=g->cd[u];i<g->cd[u+1];i++){
x=g->adj[i];
if (new[x]==-2){
new[x]=j;
old[j]=x;
sg->lab[j]=k-2;
sg->nodes[k-2][j]=j;
sg->d[k-2][j]=0;//new degrees
j++;
}
}
sg->n[k-2]=j;
for (i=0;i<sg->n[k-2];i++){//reodering adjacency list and computing new degrees
x=old[i];
for (l=g->cd[x];l<g->cd[x+1];l++){
y=g->adj[l];
j=new[y];
if (j<-2){
sg->adj[sg->core*i+sg->d[k-2][i]++]=j;
}
}
}
for (i=g->cd[v];i<g->cd[v+1];i++){
new[g->adj[i]]=-1;
}
}
unsigned long long *ckdeg_p,*ckdeg;
unsigned *ck_p;
#pragma omp threadprivate(ckdeg_p,ck_p)
void allocglobal(graph *g,unsigned k){
#pragma omp parallel
{
ck_p=calloc(k,sizeof(unsigned));
ckdeg_p=calloc(g->n,sizeof(unsigned long long));
}
ckdeg=calloc(g->n,sizeof(unsigned long long));
}
void kclique_thread(unsigned char kmax, unsigned char l, subgraph *sg, unsigned long long *n) {
unsigned i,j,k,end,u,v,w;
if (kmax==3){//can be improved
for(i=0; i<sg->n[1]; i++){//list all nodes
ckdeg_p[old[sg->nodes[1][i]]]++;
ckdeg_p[ck_p[1]]++;
ckdeg_p[ck_p[2]]++;
(*n)++;//listing here!!!
}
return;
}
if(l==2){
for(i=0; i<sg->n[2]; i++){//list all edges
u=sg->nodes[2][i];
end=u*sg->core+sg->d[2][u];
for (j=u*sg->core;j<end;j++) {
ckdeg_p[old[sg->adj[j]]]++;
ckdeg_p[old[u]]++;
for (l=2;l<kmax;l++){//ok to use l here :)
ckdeg_p[ck_p[l]]++;
}
(*n)++;//listing here!!!
}
}
return;
}
for(i=0; i<sg->n[l]; i++){
u=sg->nodes[l][i];
ck_p[l-1]=old[u];
//printf("%u %u\n",i,u);
sg->n[l-1]=0;
end=u*sg->core+sg->d[l][u];
for (j=u*sg->core;j<end;j++){//relabeling nodes and forming U'.
v=sg->adj[j];
if (sg->lab[v]==l){
sg->lab[v]=l-1;
sg->nodes[l-1][sg->n[l-1]++]=v;
sg->d[l-1][v]=0;//new degrees
}
}
for (j=0;j<sg->n[l-1];j++){//reodering adjacency list and computing new degrees
v=sg->nodes[l-1][j];
end=sg->core*v+sg->d[l][v];
for (k=sg->core*v;k<end;k++){
w=sg->adj[k];
if (sg->lab[w]==l-1){
sg->d[l-1][v]++;
}
else{
sg->adj[k--]=sg->adj[--end];
sg->adj[end]=w;
}
}
}
kclique_thread(kmax,l-1, sg, n);
for (j=0;j<sg->n[l-1];j++){//restoring labels
v=sg->nodes[l-1][j];
sg->lab[v]=l;
}
}
}
unsigned long long kclique_main(unsigned char k, graph *g) {
unsigned i;
unsigned long long n=0;
subgraph *sg;
#pragma omp parallel private(sg,i) reduction(+:n)
{
sg=allocsub(g,k);
#pragma omp for schedule(dynamic, 1) nowait
for(i=0; i<g->e; i++){
ck_p[k-1]=g->edges[i].s;
ck_p[k-2]=g->edges[i].t;
mksub(g,g->edges[i],sg,k);
kclique_thread(k,k-2, sg, &n);
}
free_subgraph(sg,k);
#pragma omp single
{
bzero(ckdeg,g->n*sizeof(unsigned long long));
}
#pragma omp barrier //is it necessary???
#pragma omp critical
{
for(i=0; i<g->n; i++){
ckdeg[i]+=ckdeg_p[i];
}
bzero(ckdeg_p,g->n*sizeof(unsigned long long));
}
}
return n;
}
void rmnodes(bool *rm,edgelist* el){
unsigned long long i,r=0;
FILE* file=fopen("debug.txt","w");
for (i=0;i<el->e;i++){
if (((rm[el->edges[i].s]==1) || (rm[el->edges[i].t]==1)) == 0){
r++;
fprintf(file,"%u %u\n",el->edges[i].s,el->edges[i].t);
}
}
//printf("el0=%llu\n",r);
for (i=0;i<el->e;i++){
//printf("%llu\n",i);
if ((rm[el->edges[i].s]==1) || (rm[el->edges[i].t]==1)){
el->edges[i--]=el->edges[--(el->e)];
}
}
//printf("el1=%u\n",el->e);
fclose(file);
}
int main(int argc,char** argv){
edgelist* el;
graph* g;
unsigned char k=atoi(argv[2]);
double eps=atof(argv[3]);
unsigned long long nck;
omp_set_num_threads(atoi(argv[1]));
time_t t0,t1,t2;
t1=time(NULL);
t0=t1;
printf("Reading edgelist from file %s\n",argv[4]);
el=readedgelist(argv[4]);
printf("Number of nodes = %u\n",el->n);
printf("Number of edges = %u\n",el->e);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
printf("Building the graph structure\n");
ord_core(el);
relabel(el);
g=mkgraph(el);
printf("Number of nodes (degree > 0) = %u\n",g->n);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
unsigned i,n_m,e_m;
double rho,rho_m=0,erho,erho_m;
unsigned long long nck_m;
bool *rm=calloc(g->n,sizeof(bool));
allocglobal(g,k);//allocataing global variables
nck=kclique_main(k, g);
do{
printf("\nCurrent graph:\n");
printf("Number of %u-cliques: %llu\n",k,nck);
printf("Number of nodes: %u\n",g->n);
printf("Number of edges: %u\n",g->e);
erho=2.*((double)g->e)/((double)(g->n*(g->n-1)));
printf("edge density: %le\n",erho);
rho=((double)nck)/((double)(g->n));
printf("Density: %le\n",rho);
if (rho>rho_m){
nck_m=nck;
rho_m=rho;
erho_m=erho;
e_m=g->e;
n_m=g->n;
}
printf("\nDensest found:\n");
printf("Number of %u-cliques: %llu\n",k,nck_m);
printf("Number of nodes: %u\n",n_m);
printf("Number of edges: %u\n",e_m);
printf("edge density: %le\n",erho_m);
printf("Density: %le\n",rho_m);
rho*=k*(1.+eps);
printf("rho = %e\n",rho);
//unsigned long long r=0,r2=0;
//printf("n,m,n,m= %u %u %u %u\n",g->n,g->e,el->n,el->e);
for (i=0;i<g->n;i++){
//if (rm[i]==1){
// printf("aaaaaaaaaaa\n");
//}
//r+=ckdeg[i];
if (((double)ckdeg[i])<rho){
rm[i]=1;
}
//else{
// printf("not remove: %u, %llu\n",i,ckdeg[i]);
// r2++;
//}
}
//printf("sumckdeg, nnodes= %llu, %llu\n",r,r2);
rmnodes(rm,el);
bzero(rm,g->n*sizeof(bool));
ord_core(el);
relabel(el);
//printf("n,m= %u %u\n",el->n,el->e);
free_graph(g);
g=mkgraph(el);
nck=kclique_main(k, g);
//printf("nck=%llu\n",nck);
}while (nck>0);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
printf("- Overall time = %ldh%ldm%lds\n",(t2-t0)/3600,((t2-t0)%3600)/60,((t2-t0)%60));
printf("\n%u %u %u %llu %e %e\n",k,n_m,e_m,nck_m,erho_m,rho_m);
return 0;
}
|
EmbeddingBag.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.)
******************************************************************************/
#if defined(USE_LIBXSMM_JIT)
#include <libxsmm.h>
#endif
#include "utils.h"
#include "rtm.h"
template <typename T>
class EmbeddingBagImpl
{
public:
EmbeddingBagImpl(int M, int E) : M(M), E(E)
{
weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment);
#ifdef USE_LIBXSMM_JIT
_ld = E;
if (sizeof(T) == 4) {
kernel = libxsmm_dispatch_meltw_reduce_cols_idx(E, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, (sizeof(long) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32);
} else {
kernel = libxsmm_dispatch_meltw_reduce_cols_idx(E, &_ld, &_ld, LIBXSMM_DATATYPE_F16, LIBXSMM_DATATYPE_F16, (sizeof(long) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32);
}
kernel1 = libxsmm_dispatch_meltw_unary(E, 0, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_REPLICATE_COL_VAR);
kernel2 = libxsmm_dispatch_meltw_binary(E, 1, &_ld, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_BINARY_BCAST_SCALAR_IN_0, LIBXSMM_MELTW_TYPE_BINARY_MULADD);
#endif
}
~EmbeddingBagImpl()
{
my_free(weight_);
weight_ = 0;
}
void init(T low = -0.1, T high = 0.1)
{
init_random(M * E, weight_, low, high);
}
#ifdef USE_LIBXSMM_JIT
void forward(int N, int NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
libxsmm_meltw_reduce_cols_idx_param params;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
params.n = end - start;
params.ind_ptr = &indices[start];
params.inp_ptr = weight;
params.out_ptr = &output[n][0];
kernel( ¶ms );
}
}
#else
void forward(int N, int NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
#pragma omp simd
for (long v = 0; v < E; v++)
output[n][v] = 0;
for (long s = start; s < end; s++)
{
auto ind = indices[s];
#pragma omp simd
for (long v = 0; v < E; v++)
{
output[n][v] += weight[ind][v];
}
}
}
}
#endif
#ifdef USE_LIBXSMM_JIT
void backward(int N, int NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
int _ld = E;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
libxsmm_meltw_unary_param unary_param;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
unsigned long long _N = end-start;
unary_param.in.primary = (void*)&gradout[n][0];
unary_param.out.primary = (void*)&values[start][0];
unary_param.out.secondary = (void*)&_N;
kernel1(&unary_param);
}
}
#else
void backward(int N, int NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
for (long s = start; s < end; s++)
{
#pragma omp simd
#ifdef STREAMING_WRITES
#pragma vector nontemporal(values)
#endif
for (long v = 0; v < E; v++)
values[s][v] = gradout[n][v];
}
}
}
#endif
#ifdef USE_LIBXSMM_JIT
void update(int NS, const T *grads_, const long *indices, float lr, int M, int use_rtm)
{
int use_lock_free = use_rtm == 0 ? 1: 0;
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
int _ld = E;
if(use_lock_free) {
/*printf("Using lock free update\n");*/
int max_thr = omp_get_max_threads();
if(M < max_thr) max_thr = M;
#pragma omp parallel num_threads(max_thr)
{
int tid = omp_get_thread_num();
for(long i = 0; i < NS; i++) {
auto ind = indices[i];
if(ind % max_thr == tid) {
libxsmm_meltw_binary_param binary_param;
binary_param.in0.primary = (void*)&lr;
binary_param.in1.primary = (void*)&grads[i][0];
binary_param.out.primary = (void*)&weight[ind][0];
{
kernel2(&binary_param);
}
}
}
}
} else {
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
libxsmm_meltw_binary_param binary_param;
long ind = indices[i];
binary_param.in0.primary = (void*)&lr;
binary_param.in1.primary = (void*)&grads[i][0];
binary_param.out.primary = (void*)&weight[ind][0];
{
TransactionScope guard(fallBackLock, 100, 0);
kernel2(&binary_param);
}
}
}
}
#else
void update(int NS, const T *grads_, const long *indices, float lr, int M, int use_rtm)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
int use_lock_free = use_rtm == 0 ? 1: 0;
if(use_lock_free) {
int max_thr = omp_get_max_threads();
if(M < max_thr) max_thr = M;
#pragma omp parallel num_threads(max_thr)
{
int tid = omp_get_thread_num();
for(long i = 0; i < NS; i++) {
auto ind = indices[i];
if(ind % max_thr == tid) {
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
} else {
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
long ind = indices[i];
{
TransactionScope guard(fallBackLock, 100, 0);
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
}
#endif
T *weight_;
int M;
int E;
#ifdef USE_LIBXSMM_JIT
int _ld;
libxsmm_meltwfunction_reduce_cols_idx kernel;
libxsmm_meltwfunction_unary kernel1;
libxsmm_meltwfunction_binary kernel2;
#endif
};
|
image-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickCore Image View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/MagickCore.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/thread-private.h"
/*
Typedef declarations.
*/
struct _ImageView
{
char
*description;
RectangleInfo
extent;
Image
*image;
CacheView
*view;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageView() makes a copy of the specified image view.
%
% The format of the CloneImageView method is:
%
% ImageView *CloneImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *CloneImageView(const ImageView *image_view)
{
ImageView
*clone_view;
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
clone_view=(ImageView *) AcquireCriticalMemory(sizeof(*clone_view));
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->description=ConstantString(image_view->description);
clone_view->extent=image_view->extent;
clone_view->view=CloneCacheView(image_view->view);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,image_view->exception);
clone_view->debug=image_view->debug;
clone_view->signature=MagickCoreSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageView() deallocates memory associated with a image view.
%
% The format of the DestroyImageView method is:
%
% ImageView *DestroyImageView(ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *DestroyImageView(ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
if (image_view->description != (char *) NULL)
image_view->description=DestroyString(image_view->description);
image_view->view=DestroyCacheView(image_view->view);
image_view->exception=DestroyExceptionInfo(image_view->exception);
image_view->signature=(~MagickCoreSignature);
image_view=(ImageView *) RelinquishMagickMemory(image_view);
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferImageViewIterator() iterates over three image views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination image view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source,
% const ImageView *duplex,ImageView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferImageViewIterator method is:
%
% MagickBooleanType DuplexTransferImageViewIterator(ImageView *source,
% ImageView *duplex,ImageView *destination,
% DuplexTransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o duplex: the duplex image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType DuplexTransferImageViewIterator(
ImageView *source,ImageView *duplex,ImageView *destination,
DuplexTransferImageViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (DuplexTransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticMetacontent() returns the image view authentic
% meta-content.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% void *GetImageViewAuthenticMetacontent(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport void *GetImageViewAuthenticMetacontent(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticMetacontent(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticPixels() returns the image view authentic pixels.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% Quantum *GetImageViewAuthenticPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Quantum *GetImageViewAuthenticPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a image view.
%
% The format of the GetImageViewException method is:
%
% char *GetImageViewException(const PixelImage *image_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o image_view: the pixel image_view.
%
% o severity: the severity of the error is returned here.
%
*/
MagickExport char *GetImageViewException(const ImageView *image_view,
ExceptionType *severity)
{
char
*description;
assert(image_view != (const ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
assert(severity != (ExceptionType *) NULL);
*severity=image_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*description='\0';
if (image_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->reason),
MagickPathExtent);
if (image_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MagickPathExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->description),
MagickPathExtent);
(void) ConcatenateMagickString(description,")",MagickPathExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewExtent() returns the image view extent.
%
% The format of the GetImageViewExtent method is:
%
% RectangleInfo GetImageViewExtent(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewImage() returns the image associated with the image view.
%
% The format of the GetImageViewImage method is:
%
% MagickCore *GetImageViewImage(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Image *GetImageViewImage(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewIterator() iterates over the image view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetImageViewIterator method is:
%
% MagickBooleanType GetImageViewIterator(ImageView *source,
% GetImageViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType GetImageViewIterator(ImageView *source,
GetImageViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (get == (GetImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualMetacontent() returns the image view virtual
% meta-content.
%
% The format of the GetImageViewVirtualMetacontent method is:
%
% const void *GetImageViewVirtualMetacontent(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const void *GetImageViewVirtualMetacontent(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualMetacontent(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualPixels() returns the image view virtual pixels.
%
% The format of the GetImageViewVirtualPixels method is:
%
% const Quantum *GetImageViewVirtualPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const Quantum *GetImageViewVirtualPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageView() returns MagickTrue if the parameter is verified as a image
% view object.
%
% The format of the IsImageView method is:
%
% MagickBooleanType IsImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport MagickBooleanType IsImageView(const ImageView *image_view)
{
if (image_view == (const ImageView *) NULL)
return(MagickFalse);
if (image_view->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageView() returns a image view required for all other methods in the
% Image View API.
%
% The format of the NewImageView method is:
%
% ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view));
(void) memset(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->image=image;
image_view->view=AcquireVirtualCacheView(image_view->image,exception);
image_view->extent.width=image->columns;
image_view->extent.height=image->rows;
image_view->extent.x=0;
image_view->extent.y=0;
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageViewRegion() returns a image view required for all other methods
% in the Image View API.
%
% The format of the NewImageViewRegion method is:
%
% ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height,
ExceptionInfo *exception)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view));
(void) memset(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->view=AcquireVirtualCacheView(image_view->image,exception);
image_view->image=image;
image_view->extent.width=width;
image_view->extent.height=height;
image_view->extent.x=x;
image_view->extent.y=y;
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewDescription() associates a description with an image view.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewDescription(ImageView *image_view,
% const char *description)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o description: the image view description.
%
*/
MagickExport void SetImageViewDescription(ImageView *image_view,
const char *description)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
image_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewIterator() iterates over the image view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetImageViewIterator method is:
%
% MagickBooleanType SetImageViewIterator(ImageView *destination,
% SetImageViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the image view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination,
SetImageViewMethod set,void *context)
{
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (ImageView *) NULL);
assert(destination->signature == MagickCoreSignature);
if (set == (SetImageViewMethod) NULL)
return(MagickFalse);
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=destination->extent.height-destination->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,destination->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,destination->description,
progress,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferImageViewIterator() iterates over two image views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination image view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const ImageView *source,
% ImageView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferImageViewIterator method is:
%
% MagickBooleanType TransferImageViewIterator(ImageView *source,
% ImageView *destination,TransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source,
ImageView *destination,TransferImageViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (TransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict pixels;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateImageViewIterator() iterates over the image view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateImageViewIterator method is:
%
% MagickBooleanType UpdateImageViewIterator(ImageView *source,
% UpdateImageViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source,
UpdateImageViewMethod update,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (update == (UpdateImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=SetImageStorageClass(source_image,DirectClass,source->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
status=SyncCacheViewAuthenticPixels(source->view,source->exception);
if (status == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
trace.c | /*
* trace.c - This file contains the functions for firing primary rays
* and handling subsequent calculations
*
* $Id: trace.c,v 1.107 2004/05/28 15:23:34 johns Exp $
*/
#include "machine.h"
#include "types.h"
#include "macros.h"
#include "vector.h"
#include "shade.h"
#include "camera.h"
#include "util.h"
#include "threads.h"
#include "parallel.h"
#include "intersect.h"
#include "ui.h"
#include "trace.h"
color trace(ray * primary) {
if (primary->depth > 0) {
intersect_objects(primary);
return primary->scene->shader(primary);
}
/* if ray is truncated, return the background as its color */
return primary->scene->background;
}
void * thread_trace(thr_parms * t) {
unsigned long * local_mbox = NULL;
scenedef * scene;
int addr, R,G,B;
unsigned char * img;
color col;
ray primary;
int x, y, do_ui, hskip;
int startx, stopx, xinc, starty, stopy, yinc, hsize, vres;
#if defined(_OPENMP)
#pragma omp parallel
{
#endif
/*
* Copy all of the frequently used parameters into local variables.
* This seems to improve performance, especially on NUMA systems.
*/
startx = t->startx;
stopx = t->stopx;
xinc = t->xinc;
starty = t->starty;
stopy = t->stopy;
yinc = t->yinc;
scene = t->scene;
img = scene->img;
hsize = scene->hres*3;
vres = scene->vres;
hskip = xinc * 3;
do_ui = (scene->mynode == 0 && t->tid == 0);
#if !defined(DISABLEMBOX)
/* allocate mailbox array per thread... */
#if defined(_OPENMP)
local_mbox = (unsigned long *)calloc(sizeof(unsigned long)*scene->numobjects, 1);
#else
if (t->local_mbox == NULL)
local_mbox = (unsigned long *)calloc(sizeof(unsigned long)*scene->objgroup.numobjects, 1);
else
local_mbox = t->local_mbox;
#endif
#else
local_mbox = NULL; /* mailboxes are disabled */
#endif
#if defined(_OPENMP)
#pragma omp single
#endif
/*
* If we are getting close to integer wraparound on the
* ray serial numbers, we need to re-clear the mailbox
* array(s). Each thread maintains its own serial numbers
* so only those threads that are getting hit hard will
* need to re-clear their mailbox arrays. In all likelihood,
* the threads will tend to hit their counter limits at about
* the same time though.
* When compiled on platforms with a 64-bit long, this counter won't
* wraparound in _anyone's_ lifetime, so no need to even check....
* On lesser-bit platforms, we're not quite so lucky, so we have to check.
*/
#if !defined(LP64)
if (local_mbox != NULL) {
if (t->serialno > (((unsigned long) 1) << ((sizeof(unsigned long) * 8) - 3))) {
memset(local_mbox, 0, sizeof(unsigned long) * scene->objgroup.numobjects);
t->serialno = 1;
}
}
#endif
/* setup the thread-specific properties of the primary ray(s) */
camray_init(scene, &primary, t->serialno, local_mbox);
#if defined(_OPENMP)
#pragma omp for schedule(runtime)
#endif
for (y=starty; y<=stopy; y+=yinc) {
addr = hsize * (y - 1) + (3 * (startx - 1)); /* scanline address */
for (x=startx; x<=stopx; x+=xinc) {
col=scene->camera.cam_ray(&primary, x, y); /* generate ray */
R = (int) (col.r * 255.0f); /* quantize float to integer */
G = (int) (col.g * 255.0f); /* quantize float to integer */
B = (int) (col.b * 255.0f); /* quantize float to integer */
if (R > 255) R = 255; /* clamp pixel value to range 0-255 */
img[addr ] = (byte) R; /* Store final pixel to the image buffer */
if (G > 255) G = 255; /* clamp pixel value to range 0-255 */
img[addr + 1] = (byte) G; /* Store final pixel to the image buffer */
if (B > 255) B = 255; /* clamp pixel value to range 0-255 */
img[addr + 2] = (byte) B; /* Store final pixel to the image buffer */
addr += hskip;
} /* end of x-loop */
if (do_ui && !((y-1) % 64)) {
rt_ui_progress((100 * y) / vres); /* call progress meter callback */
}
#ifdef MPI
if (scene->nodes > 1) {
rt_thread_barrier(t->runbar, 1);
rt_sendrecvscanline(scene->parbuf);
}
#endif
} /* end y-loop */
t->serialno = primary.serial + 1;
#if defined(_OPENMP)
if (local_mbox != NULL)
free(local_mbox);
#else
if (t->local_mbox == NULL) {
if (local_mbox != NULL)
free(local_mbox);
}
#endif
if (scene->nodes == 1)
rt_thread_barrier(t->runbar, 1);
#if defined(_OPENMP)
}
#endif
return(NULL);
}
|
omptough.c | #include <pthread.h>
#include <stdlib.h>
#include <malloc.h>
#include <unistd.h>
#include <stdio.h>
#include <omp.h>
#include "papi_test.h"
#define NITER (100000)
int main( int argc, char* argv[] )
{
int i;
int ret;
int nthreads;
int *evtset;
int *ctrcode;
nthreads = omp_get_max_threads();
evtset = (int*) malloc( sizeof(int)*nthreads );
ctrcode = (int*) malloc( sizeof(int)*nthreads );
tests_quiet(argc, argv); /* Set TESTS_QUIET variable */
ret=PAPI_library_init( PAPI_VER_CURRENT );
if( ret!=PAPI_VER_CURRENT && ret>0 )
{
fprintf(stderr, "PAPI library version mismatch '%s'\n",
PAPI_strerror(ret) );
exit(1);
}
if( ret<0 )
{
fprintf(stderr, "PAPI initialization error '%s'\n",
PAPI_strerror(ret) );
exit(1);
}
if( (ret = PAPI_thread_init((unsigned long (*)(void)) pthread_self)) != PAPI_OK )
{
fprintf(stderr, "PAPI thread initialization error '%s'\n",
PAPI_strerror(ret) );
exit(1);
}
for( i=0; i<nthreads; i++ )
{
evtset[i]=PAPI_NULL;
if( (ret=PAPI_event_name_to_code( "PAPI_TOT_INS", &ctrcode[i]))
!=PAPI_OK )
{
fprintf(stderr, "PAPI evt-name-to-code error '%s'\n",
PAPI_strerror(ret) );
}
}
for( i=0; i<NITER; i++ ){
#pragma omp parallel
{
int tid;
int pid;
tid = omp_get_thread_num();
pid = pthread_self();
if( (ret=PAPI_register_thread()) != PAPI_OK ){
if (!TESTS_QUIET) {
fprintf(stderr, "[%5d] Error in register thread (tid=%d pid=%d) '%s'\n",
i, tid, pid, PAPI_strerror(ret) );
test_fail(__FILE__, __LINE__, "omptough", 1);
}
}
evtset[tid]=PAPI_NULL;
if( (ret=PAPI_create_eventset(&(evtset[tid]))) != PAPI_OK ){
if (!TESTS_QUIET) {
fprintf(stderr, "[%5d] Error creating eventset (tid=%d pid=%d) '%s'\n",
i, tid, pid, PAPI_strerror(ret) );
test_fail(__FILE__, __LINE__, "omptough", 1);
}
}
if( (ret=PAPI_destroy_eventset(&(evtset[tid]))) != PAPI_OK ){
if (!TESTS_QUIET) {
fprintf(stderr, "[%5d] Error destroying eventset (tid=%d pid=%d) '%s'\n",
i, tid, pid, PAPI_strerror(ret) );
evtset[tid]=PAPI_NULL;
test_fail(__FILE__, __LINE__, "omptough", 1);
}
}
if( (ret=PAPI_unregister_thread()) != PAPI_OK ){
if (!TESTS_QUIET) {
fprintf(stderr, "[%5d] Error in unregister thread (tid=%d pid=%d) ret='%s'\n",
i, tid, pid, PAPI_strerror(ret) );
test_fail(__FILE__, __LINE__, "omptough", 1);
}
}
}
}
test_pass(__FILE__, NULL, 0);
exit(1);
}
|
GB_AxB_colscale_template.c | //------------------------------------------------------------------------------
// GB_AxB_colscale_template: C=A*D where D is a square diagonal matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// This template is not used If C is iso, since all that is needed is to create
// C as a shallow-copy of the pattern of A.
// A and C can be jumbled. D cannot, but it is a diagonal matrix so it is
// never jumbled.
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// Dx, j, and Ah are unused if the operator is FIRST or PAIR
#include "GB_unused.h"
ASSERT (GB_JUMBLED_OK (C)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_JUMBLED (D)) ;
ASSERT (!C->iso) ;
//--------------------------------------------------------------------------
// get C, A, and D
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const GB_ATYPE *restrict Ax = (GB_ATYPE *) (A_is_pattern ? NULL : A->x) ;
const GB_BTYPE *restrict Dx = (GB_BTYPE *) (D_is_pattern ? NULL : D->x) ;
const int64_t avlen = A->vlen ;
const bool A_iso = A->iso ;
const bool D_iso = D->iso ;
const int64_t *restrict kfirst_Aslice = A_ek_slicing ;
const int64_t *restrict klast_Aslice = A_ek_slicing + A_ntasks ;
const int64_t *restrict pstart_Aslice = A_ek_slicing + A_ntasks * 2 ;
//--------------------------------------------------------------------------
// C=A*D
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
//----------------------------------------------------------------------
// C(:,kfirst:klast) = A(:,kfirst:klast)*D(kfirst:klast,kfirst:klast)
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) and C(:,k) to be operated on by this task
//------------------------------------------------------------------
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, avlen) ;
//------------------------------------------------------------------
// C(:,j) = A(:,j)*D(j,j)
//------------------------------------------------------------------
GB_GETB (djj, Dx, j, D_iso) ; // djj = D (j,j)
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = pA_start ; p < pA_end ; p++)
{
GB_GETA (aij, Ax, p, A_iso) ; // aij = A(i,j)
GB_BINOP (GB_CX (p), aij, djj, 0, 0) ; // C(i,j) = aij * djj
}
}
}
}
|
ast-dump-openmp-taskloop-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp taskloop simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp taskloop simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp taskloop simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp taskloop simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp taskloop simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-taskloop-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:4:9, col:26>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:10:9, col:26>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:17:9, col:38>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:24:9, col:38>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTaskLoopSimdDirective {{.*}} <line:31:9, col:38>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:36> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
LAGraph_pagerank3c.c | //------------------------------------------------------------------------------
// LAGraph_pagerank3c: pagerank using a real semiring
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_pagerank3c: GAP-style PageRank, with import/export
// See also LAGraph_pagerank3a, for the same computation without import/export.
// This algorithm follows the specification given in the GAP Benchmark Suite:
// https://arxiv.org/abs/1508.03619 which assumes that both A and A' are
// already available, as are the row and column degrees.
// The GAP Benchmark algorithm assumes the graph has no nodes with no out-going
// edges (otherwise, a divide-by-zero occurs). In terms of the adjacency
// matrix, it assumes there are no rows in A that have no entries.
// For fastest results, the input matrix should stored in GxB_BY_COL format.
// TODO: or use AT by row, since the GAP assumes both A and A' are available.
#include "LAGraph.h"
#define LAGRAPH_FREE_WORK \
{ \
LAGRAPH_FREE (I) ; \
LAGRAPH_FREE (pr) ; \
LAGRAPH_FREE (prior) ; \
GrB_free (&v) ; \
}
#define LAGRAPH_FREE_ALL \
{ \
LAGRAPH_FREE_WORK ; \
GrB_free (result) ; \
}
GrB_Info LAGraph_pagerank3c // PageRank definition
(
GrB_Vector *result, // output: array of LAGraph_PageRank structs
GrB_Matrix A, // binary input graph, not modified
const float *LA_RESTRICT d_out, // out degree of each node (GrB_FP32, size n)
float damping, // damping factor (typically 0.85)
int itermax, // maximum number of iterations
int *iters // output: number of iterations taken
)
{
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Index n, ncols ;
GrB_Vector v = NULL ;
GrB_Index *I = NULL ;
float *LA_RESTRICT pr = NULL ;
float *prior = NULL ;
(*result) = NULL ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nrows (&n, A) ;
if (ncols != n)
{
LAGRAPH_ERROR ("matrix must be square", GrB_DIMENSION_MISMATCH) ;
}
// Teleport value
const float teleport = (1 - damping) / n ;
const float tol = 1e-4 ;
float rdiff = 1 ; // first iteration is always done
GrB_Type type = GrB_FP32 ;
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (n, nthreads) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// initializing pr and I
pr = LAGraph_malloc (n, sizeof (float)) ;
I = LAGraph_malloc (n, sizeof (GrB_Index)) ;
prior = LAGraph_malloc (n, sizeof (float)) ;
if (pr == NULL || I == NULL || prior == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < n ; k++)
{
I [k] = k ;
pr [k] = 1.0/n ;
}
//--------------------------------------------------------------------------
// pagerank iterations
//--------------------------------------------------------------------------
for ((*iters) = 0 ; (*iters) < itermax && rdiff > tol ; (*iters)++)
{
// printf ("\n============================ pagerank 3C iter: %d\n", (*iters)) ;
// Importance calculation
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t i = 0 ; i < n; i++)
{
prior [i] = pr [i] ;
pr [i] = damping * pr [i] / d_out [i] ;
}
// import pr and I into v
LAGr_Vector_import (&v, GrB_FP32, n, n, &I, (void **) (&pr), NULL) ;
// Calculate total PR of all inbound vertices: v = A' * v
LAGr_mxv (v, NULL, NULL, GxB_PLUS_SECOND_FP32, A, v, LAGraph_desc_tooo);
GrB_Index nvals ;
LAGr_Vector_nvals (&nvals, v) ;
if (nvals != n)
{
LAGRAPH_ERROR ("Matrix must not have empty rows or columns!",
GrB_PANIC) ;
}
// export v to pr and I
LAGr_Vector_export (&v, &type, &n, &nvals, &I, (void **) (&pr), NULL) ;
// add teleport and check for convergence
rdiff = 0 ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:rdiff)
for (int64_t i = 0 ; i < n; i++)
{
pr [i] += teleport ;
rdiff += fabsf (prior [i] - pr [i]) ;
}
}
// import result (pr and I) into final result
LAGr_Vector_import (result, GrB_FP32, n, n, &I, (void **) (&pr), NULL) ;
LAGRAPH_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
kernel.c | #include "SimpleMOC-kernel_header.h"
void run_kernel( Input * I, Source * S, Table * table)
{
// Enter Parallel Region
#pragma omp parallel default(none) shared(I, S, table)
{
#ifdef OPENMP
int thread = omp_get_thread_num();
#else
int thread = 0;
#endif
// Create Thread Local Random Seed
unsigned int seed = time(NULL) * (thread+1);
// Allocate Thread Local SIMD Vectors (align if using intel compiler)
#ifdef INTEL
SIMD_Vectors simd_vecs = aligned_allocate_simd_vectors(I);
float * state_flux = (float *) _mm_malloc(
I->egroups * sizeof(float), 64);
#else
SIMD_Vectors simd_vecs = allocate_simd_vectors(I);
float * state_flux = (float *) malloc(
I->egroups * sizeof(float));
#endif
// Allocate Thread Local Flux Vector
for( int i = 0; i < I->egroups; i++ )
state_flux[i] = (float) rand_r(&seed) / RAND_MAX;
// Initialize PAPI Counters (if enabled)
#ifdef PAPI
int eventset = PAPI_NULL;
int num_papi_events;
#pragma omp critical
{
counter_init(&eventset, &num_papi_events, I);
}
#endif
// Enter OMP For Loop over Segments
#pragma omp for schedule(dynamic,100)
for( long i = 0; i < I->segments; i++ )
{
// Pick Random QSR
int QSR_id = rand_r(&seed) % I->source_3D_regions;
// Pick Random Fine Axial Interval
int FAI_id = rand_r(&seed) % I->fine_axial_intervals;
// Attenuate Segment
attenuate_segment( I, S, QSR_id, FAI_id, state_flux,
&simd_vecs, table);
}
// Stop PAPI Counters
#ifdef PAPI
if( thread == 0 )
{
printf("\n");
border_print();
center_print("PAPI COUNTER RESULTS", 79);
border_print();
printf("Count \tSmybol \tDescription\n");
}
{
#pragma omp barrier
}
counter_stop(&eventset, num_papi_events, I);
#endif
}
}
void attenuate_segment( Input * restrict I, Source * restrict S,
int QSR_id, int FAI_id, float * restrict state_flux,
SIMD_Vectors * restrict simd_vecs, Table * restrict table)
{
// Unload local vector vectors
float * restrict q0 = simd_vecs->q0;
float * restrict q1 = simd_vecs->q1;
float * restrict q2 = simd_vecs->q2;
float * restrict sigT = simd_vecs->sigT;
float * restrict tau = simd_vecs->tau;
float * restrict sigT2 = simd_vecs->sigT2;
float * restrict expVal = simd_vecs->expVal;
float * restrict reuse = simd_vecs->reuse;
float * restrict flux_integral = simd_vecs->flux_integral;
float * restrict tally = simd_vecs->tally;
float * restrict t1 = simd_vecs->t1;
float * restrict t2 = simd_vecs->t2;
float * restrict t3 = simd_vecs->t3;
float * restrict t4 = simd_vecs->t4;
// Some placeholder constants - In the full app some of these are
// calculated based off position in geometry. This treatment
// shaves off a few FLOPS, but is not significant compared to the
// rest of the function.
const float dz = 0.1f;
const float zin = 0.3f;
const float weight = 0.5f;
const float mu = 0.9f;
const float mu2 = 0.3f;
const float ds = 0.7f;
const int egroups = I->egroups;
// load fine source region flux vector
float * FSR_flux = &S[QSR_id].fine_flux[FAI_id * egroups];
if( FAI_id == 0 )
{
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
float * f3 = &S[QSR_id].fine_source[(FAI_id+1)*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y2 = f2[g];
const float y3 = f3[g];
// do linear "fitting"
const float c0 = y2;
const float c1 = (y3 - y2) / dz;
// calculate q0, q1, q2
q0[g] = c0 + c1*zin;
q1[g] = c1;
q2[g] = 0;
}
}
else if ( FAI_id == I->fine_axial_intervals - 1 )
{
float * f1 = &S[QSR_id].fine_source[(FAI_id-1)*egroups];
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y1 = f1[g];
const float y2 = f2[g];
// do linear "fitting"
const float c0 = y2;
const float c1 = (y2 - y1) / dz;
// calculate q0, q1, q2
q0[g] = c0 + c1*zin;
q1[g] = c1;
q2[g] = 0;
}
}
else
{
float * f1 = &S[QSR_id].fine_source[(FAI_id-1)*egroups];
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
float * f3 = &S[QSR_id].fine_source[(FAI_id+1)*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y1 = f1[g];
const float y2 = f2[g];
const float y3 = f3[g];
// do quadratic "fitting"
const float c0 = y2;
const float c1 = (y1 - y3) / (2.f*dz);
const float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz);
// calculate q0, q1, q2
q0[g] = c0 + c1*zin + c2*zin*zin;
q1[g] = c1 + 2.f*c2*zin;
q2[g] = c2;
}
}
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load total cross section
sigT[g] = S[QSR_id].sigT[g];
// calculate common values for efficiency
tau[g] = sigT[g] * ds;
sigT2[g] = sigT[g] * sigT[g];
}
// cycle over energy groups
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
#ifdef TABLE
expVal[g] = interpolateTable( table, tau[g] );
#else
expVal[g] = 1.f - expf( -tau[g] ); // exp is faster on many architectures
#endif
}
// Flux Integral
// Re-used Term
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
reuse[g] = tau[g] * (tau[g] - 2.f) + 2.f * expVal[g]
/ (sigT[g] * sigT2[g]);
}
//#pragma vector alignednontemporal
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// add contribution to new source flux
flux_integral[g] = (q0[g] * tau[g] + (sigT[g] * state_flux[g] - q0[g])
* expVal[g]) / sigT2[g] + q1[g] * mu * reuse[g] + q2[g] * mu2
* (tau[g] * (tau[g] * (tau[g] - 3.f) + 6.f) - 6.f * expVal[g])
/ (3.f * sigT2[g] * sigT2[g]);
}
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// Prepare tally
tally[g] = weight * flux_integral[g];
}
#ifdef OPENMP
omp_set_lock(S[QSR_id].locks + FAI_id);
#endif
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
FSR_flux[g] += tally[g];
}
#ifdef OPENMP
omp_unset_lock(S[QSR_id].locks + FAI_id);
#endif
// Term 1
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t1[g] = q0[g] * expVal[g] / sigT[g];
}
// Term 2
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t2[g] = q1[g] * mu * (tau[g] - expVal[g]) / sigT2[g];
}
// Term 3
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t3[g] = q2[g] * mu2 * reuse[g];
}
// Term 4
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t4[g] = state_flux[g] * (1.f - expVal[g]);
}
// Total psi
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
state_flux[g] = t1[g] + t2[g] + t3[g] + t4[g];
}
}
/* Interpolates a formed exponential table to compute ( 1- exp(-x) )
* at the desired x value */
float interpolateTable( Table * restrict table, float x)
{
// check to ensure value is in domain
if( x > table->maxVal )
return 1.0f;
else
{
int interval = (int) ( x / table->dx + 0.5f * table->dx );
/*
if( interval >= table->N || interval < 0)
{
printf( "Interval = %d\n", interval);
printf( "N = %d\n", table->N);
printf( "x = %f\n", x);
printf( "dx = %f\n", table->dx);
exit(1);
}
*/
interval = interval * 2;
float slope = table->values[ interval ];
float intercept = table->values[ interval + 1 ];
float val = slope * x + intercept;
return val;
}
}
|
target_exit_data_map_messages.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - -x c++ %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp-simd -ferror-limit 100 -o - %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp-simd -ferror-limit 100 -o - -x c++ %s -Wuninitialized
int main(int argc, char **argv) {
int r;
#pragma omp target exit data // expected-error {{expected at least one 'map' clause for '#pragma omp target exit data'}}
#pragma omp target exit data map(r) // expected-error {{map type must be specified for '#pragma omp target exit data'}}
#pragma omp target exit data map(tofrom: r) // expected-error {{map type 'tofrom' is not allowed for '#pragma omp target exit data'}}
#pragma omp target exit data map(always, from: r) allocate(r) // expected-error {{unexpected OpenMP clause 'allocate' in directive '#pragma omp target exit data'}}
#pragma omp target exit data map(delete: r)
#pragma omp target exit data map(release: r)
#pragma omp target exit data map(always, alloc: r) // expected-error {{map type 'alloc' is not allowed for '#pragma omp target exit data'}}
#pragma omp target exit data map(to: r) // expected-error {{map type 'to' is not allowed for '#pragma omp target exit data'}}
return 0;
}
|
GB_binop__bor_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int8)
// C=scalar+B GB (_bind1st__bor_int8)
// C=scalar+B' GB (_bind1st_tran__bor_int8)
// C=A+scalar GB (_bind2nd__bor_int8)
// C=A'+scalar GB (_bind2nd_tran__bor_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
MorseSmaleComplex3D.h | /// \ingroup base
/// \class ttk::MorseSmaleComplex3D::
/// \author Guillaume Favelier <guillaume.favelier@lip6.fr>
/// \author Julien Tierny <julien.tierny@lip6.fr>
/// \date February 2017.
///
/// \brief TTK %morseSmaleComplex3D processing package.
///
/// %MorseSmaleComplex3D is a TTK processing package that takes a scalar field on the input
/// and produces a scalar field on the output.
///
/// \sa ttk::Triangulation
/// \sa ttkMorseSmaleComplex3D.cpp %for a usage example.
#ifndef _MORSESMALECOMPLEX3D_H
#define _MORSESMALECOMPLEX3D_H
// base code includes
#include<AbstractMorseSmaleComplex.h>
namespace ttk{
/**
* Class specialized in building the Morse-Smale complex
* of 3D triangulation.
*/
class MorseSmaleComplex3D : public AbstractMorseSmaleComplex{
public:
MorseSmaleComplex3D();
~MorseSmaleComplex3D();
/**
* Main function for computing the whole Morse-Smale complex.
*/
template<typename dataType, typename idtype>
int execute();
/**
* Compute the (saddle1, saddle2) pairs not detected by the
* contour tree.
*/
template<typename dataType, typename idType>
int computePersistencePairs(const std::vector<std::tuple<SimplexId, SimplexId, dataType>>& JTPairs,
const std::vector<std::tuple<SimplexId, SimplexId, dataType>>& STPairs,
std::vector<std::tuple<SimplexId, SimplexId, dataType>>& pl_saddleSaddlePairs);
template <typename dataType>
int setAugmentedCriticalPoints(const std::vector<dcg::Cell>& criticalPoints,
SimplexId* ascendingManifold,
SimplexId* descendingManifold) const;
/**
* Compute the descending 1-separatrices by reading into the discrete
* gradient.
*/
int getAscendingSeparatrices1(const std::vector<dcg::Cell>& criticalPoints,
std::vector<Separatrix>& separatrices,
std::vector<std::vector<dcg::Cell>>& separatricesGeometry) const;
/**
* Compute the saddle-connectors by reading into the discrete
* gradient.
*/
int getSaddleConnectors(const std::vector<dcg::Cell>& criticalPoints,
std::vector<Separatrix>& separatrices,
std::vector<std::vector<dcg::Cell>>& separatricesGeometry) const;
/**
* Compute the geometrical embedding of the saddle-connectors. This
* function needs the following internal pointers to be set:
* outputSeparatrices1_numberOfPoints_
* outputSeparatrices1_points_
* outputSeparatrices1_numberOfCells_
* outputSeparatrices1_cells_
* inputScalarField_
*/
template<typename dataType>
int setSaddleConnectors(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry) const;
/**
* Compute the 2-separatrices by reading into the discrete
* gradient from the maxima.
*/
int getDescendingSeparatrices2(const std::vector<dcg::Cell>& criticalPoints,
std::vector<Separatrix>& separatrices,
std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
std::vector<std::set<SimplexId>>& separatricesSaddles) const;
/**
* Compute the geometrical embedding of the descending
* 2-separatrices. This function needs the following
* internal pointers to be set:
* outputSeparatrices2_numberOfPoints_
* outputSeparatrices2_points_
* outputSeparatrices2_numberOfCells_
* outputSeparatrices2_cells_
* inputScalarField_
*/
template<typename dataType>
int setDescendingSeparatrices2(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
const std::vector<std::set<SimplexId>>& separatricesSaddles) const;
#ifdef TTK_ENABLE_OPENMP
template<typename dataType>
int omp_setDescendingSeparatrices2(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
const std::vector<std::set<SimplexId>>& separatricesSaddles) const;
#endif
int getDualPolygon(const SimplexId edgeId, std::vector<SimplexId>& polygon) const;
int sortDualPolygonVertices(std::vector<SimplexId>& polygon) const;
/**
* Compute the 2-separatrices by reading into the discrete
* gradient from the minima.
*/
int getAscendingSeparatrices2(const std::vector<dcg::Cell>& criticalPoints,
std::vector<Separatrix>& separatrices,
std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
std::vector<std::set<SimplexId>>& separatricesSaddles) const;
/**
* Compute the geometrical embedding of the ascending
* 2-separatrices. This function needs the following
* internal pointers to be set:
* outputSeparatrices2_numberOfPoints_
* outputSeparatrices2_points_
* outputSeparatrices2_numberOfCells_
* outputSeparatrices2_cells_
* inputScalarField_
*/
template<typename dataType>
int setAscendingSeparatrices2(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
const std::vector<std::set<SimplexId>>& separatricesSaddles) const;
#ifdef TTK_ENABLE_OPENMP
template<typename dataType>
int omp_setAscendingSeparatrices2(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
const std::vector<std::set<SimplexId>>& separatricesSaddles) const;
#endif
};
}
template<typename dataType>
int ttk::MorseSmaleComplex3D::setSaddleConnectors(const
std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry) const{
#ifndef TTK_ENABLE_KAMIKAZE
if(!outputSeparatrices1_numberOfPoints_){
std::cerr << "[MorseSmaleComplex3D] 1-separatrices pointer to numberOfPoints is null." << std::endl;
return -1;
}
if(!outputSeparatrices1_points_){
std::cerr << "[MorseSmaleComplex3D] 1-separatrices pointer to points is null." << std::endl;
return -1;
}
if(!outputSeparatrices1_numberOfCells_){
std::cerr << "[MorseSmaleComplex3D] 1-separatrices pointer to numberOfCells is null." << std::endl;
return -1;
}
if(!outputSeparatrices1_cells_){
std::cerr << "[MorseSmaleComplex3D] 1-separatrices pointer to cells is null." << std::endl;
return -1;
}
if(!inputScalarField_){
std::cerr << "[MorseSmaleComplex3D] 1-separatrices pointer to the input scalar field is null." << std::endl;
return -1;
}
#endif
const dataType* const scalars=static_cast<dataType*>(inputScalarField_);
std::vector<dataType>* outputSeparatrices1_cells_separatrixFunctionMaxima=
static_cast<std::vector<dataType>*>(outputSeparatrices1_cells_separatrixFunctionMaxima_);
std::vector<dataType>* outputSeparatrices1_cells_separatrixFunctionMinima=
static_cast<std::vector<dataType>*>(outputSeparatrices1_cells_separatrixFunctionMinima_);
std::vector<dataType>* outputSeparatrices1_cells_separatrixFunctionDiffs=
static_cast<std::vector<dataType>*>(outputSeparatrices1_cells_separatrixFunctionDiffs_);
SimplexId pointId=(*outputSeparatrices1_numberOfPoints_);
SimplexId cellId=(*outputSeparatrices1_numberOfCells_);
SimplexId separatrixId=0;
if(outputSeparatrices1_cells_separatrixIds_ and
outputSeparatrices1_cells_separatrixIds_->size()){
separatrixId=*std::max_element(outputSeparatrices1_cells_separatrixIds_->begin(),
outputSeparatrices1_cells_separatrixIds_->end())+1;
}
for(const Separatrix& separatrix : separatrices){
if(!separatrix.isValid_) continue;
if(!separatrix.geometry_.size()) continue;
const dcg::Cell& saddle1=separatrix.source_;
const dcg::Cell& saddle2=separatrix.destination_;
// get separatrix type : saddle-connector
const char separatrixType=1;
// compute separatrix function diff
const dataType separatrixFunctionMaximum=std::max(discreteGradient_.scalarMax<dataType>(saddle1, scalars),
discreteGradient_.scalarMax<dataType>(saddle2, scalars));
const dataType separatrixFunctionMinimum=std::min(discreteGradient_.scalarMin<dataType>(saddle1, scalars),
discreteGradient_.scalarMin<dataType>(saddle2, scalars));
const dataType separatrixFunctionDiff=separatrixFunctionMaximum-separatrixFunctionMinimum;
// get boundary condition
const char isOnBoundary=(discreteGradient_.isBoundary(saddle1) and discreteGradient_.isBoundary(saddle2));
bool isFirst=true;
for(const SimplexId geometryId : separatrix.geometry_){
SimplexId oldPointId=-1;
for(auto cellIte=separatricesGeometry[geometryId].begin(); cellIte!=separatricesGeometry[geometryId].end(); ++cellIte){
const dcg::Cell& cell=*cellIte;
float point[3];
discreteGradient_.getCellIncenter(cell, point);
outputSeparatrices1_points_->push_back(point[0]);
outputSeparatrices1_points_->push_back(point[1]);
outputSeparatrices1_points_->push_back(point[2]);
if(outputSeparatrices1_points_smoothingMask_){
if(cellIte==separatricesGeometry[geometryId].begin() or
cellIte==separatricesGeometry[geometryId].end()-1)
outputSeparatrices1_points_smoothingMask_->push_back(0);
else
outputSeparatrices1_points_smoothingMask_->push_back(1);
}
if(outputSeparatrices1_points_cellDimensions_)
outputSeparatrices1_points_cellDimensions_->push_back(cell.dim_);
if(outputSeparatrices1_points_cellIds_)
outputSeparatrices1_points_cellIds_->push_back(cell.id_);
if(oldPointId!=-1){
outputSeparatrices1_cells_->push_back(2);
outputSeparatrices1_cells_->push_back(oldPointId);
outputSeparatrices1_cells_->push_back(pointId);
if(outputSeparatrices1_cells_sourceIds_)
outputSeparatrices1_cells_sourceIds_->push_back(saddle1.id_);
if(outputSeparatrices1_cells_destinationIds_)
outputSeparatrices1_cells_destinationIds_->push_back(saddle2.id_);
if(outputSeparatrices1_cells_separatrixIds_)
outputSeparatrices1_cells_separatrixIds_->push_back(separatrixId);
if(outputSeparatrices1_cells_separatrixTypes_)
outputSeparatrices1_cells_separatrixTypes_->push_back(separatrixType);
if(outputSeparatrices1_cells_separatrixFunctionMaxima)
outputSeparatrices1_cells_separatrixFunctionMaxima->push_back(separatrixFunctionMaximum);
if(outputSeparatrices1_cells_separatrixFunctionMinima)
outputSeparatrices1_cells_separatrixFunctionMinima->push_back(separatrixFunctionMinimum);
if(outputSeparatrices1_cells_separatrixFunctionDiffs)
outputSeparatrices1_cells_separatrixFunctionDiffs->push_back(separatrixFunctionDiff);
if(outputSeparatrices1_cells_isOnBoundary_)
outputSeparatrices1_cells_isOnBoundary_->push_back(isOnBoundary);
++cellId;
isFirst=false;
}
oldPointId=pointId;
++pointId;
}
}
if(!isFirst)
++separatrixId;
}
(*outputSeparatrices1_numberOfPoints_)=pointId;
(*outputSeparatrices1_numberOfCells_)=cellId;
return 0;
}
#ifdef TTK_ENABLE_OPENMP
template<typename dataType>
int ttk::MorseSmaleComplex3D::omp_setAscendingSeparatrices2(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
const std::vector<std::set<SimplexId>>& separatricesSaddles) const{
#ifndef TTK_ENABLE_KAMIKAZE
if(!outputSeparatrices2_numberOfPoints_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to numberOfPoints is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_points_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to points is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_numberOfCells_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to numberOfCells is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_cells_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to cells is null." << std::endl;
return -1;
}
if(!inputScalarField_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to the input scalar field is null." << std::endl;
return -1;
}
#endif
const dataType* const scalars=static_cast<dataType*>(inputScalarField_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionMaxima=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionMaxima_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionMinima=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionMinima_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionDiffs=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionDiffs_);
SimplexId pointId=(*outputSeparatrices2_numberOfPoints_);
SimplexId separatrixId=0;
if(outputSeparatrices2_cells_separatrixIds_ and
outputSeparatrices2_cells_separatrixIds_->size()){
separatrixId=*std::max_element(outputSeparatrices2_cells_separatrixIds_->begin(),
outputSeparatrices2_cells_separatrixIds_->end())+1;
}
std::vector<SimplexId> separatrixIds(threadNumber_, 0);
std::vector<SimplexId> numberOfPoints(threadNumber_, 0);
std::vector<std::vector<float>> separatrices2_points(threadNumber_);
std::vector<SimplexId> numberOfCells(threadNumber_, 0);
std::vector<std::vector<SimplexId>> separatrices2_cells(threadNumber_);
std::vector<std::vector<SimplexId>> separatrices2_cells_sourceIds(threadNumber_);
std::vector<std::vector<SimplexId>> separatrices2_cells_separatrixIds(threadNumber_);
std::vector<std::vector<char>> separatrices2_cells_separatrixTypes(threadNumber_);
std::vector<std::vector<dataType>> separatrices2_cells_separatrixFunctionMaxima(threadNumber_);
std::vector<std::vector<dataType>> separatrices2_cells_separatrixFunctionMinima(threadNumber_);
std::vector<std::vector<dataType>> separatrices2_cells_separatrixFunctionDiffs(threadNumber_);
std::vector<std::vector<char>> separatrices2_cells_isOnBoundary(threadNumber_);
const SimplexId numberOfSeparatrices=separatrices.size();
#pragma omp parallel for num_threads(threadNumber_)
for(SimplexId i=0; i<numberOfSeparatrices; ++i){
const ThreadId threadId=omp_get_thread_num();
const Separatrix& separatrix=separatrices[i];
if(!separatrix.isValid_) continue;
if(!separatrix.geometry_.size()) continue;
const dcg::Cell& saddle=separatrix.source_;
const char separatrixType=1;
const SimplexId saddleId=saddle.id_;
const dataType separatrixFunctionMinimum=discreteGradient_.scalarMin<dataType>(saddle, scalars);
dataType separatrixFunctionMaximum{};
// get separatrix infos
char isOnBoundary{};
bool isFirst=true;
for(const SimplexId saddle2Id : separatricesSaddles[i]){
if(inputTriangulation_->isTriangleOnBoundary(saddle2Id))
++isOnBoundary;
if(isFirst){
separatrixFunctionMaximum=discreteGradient_.scalarMax<dataType>(dcg::Cell(2,saddle2Id), scalars);
isFirst=false;
}
else{
separatrixFunctionMaximum=std::max(separatrixFunctionMaximum,
discreteGradient_.scalarMax<dataType>(dcg::Cell(2,saddle2Id), scalars));
}
}
const dataType separatrixFunctionDiff=separatrixFunctionMaximum-separatrixFunctionMinimum;
isFirst=true;
for(const SimplexId geometryId : separatrix.geometry_){
for(const dcg::Cell& edge : separatricesGeometry[geometryId]){
const SimplexId edgeId=edge.id_;
// Transform to dual : edge -> polygon
std::vector<SimplexId> polygon;
getDualPolygon(edgeId, polygon);
const SimplexId vertexNumber=polygon.size();
if(vertexNumber>2){
sortDualPolygonVertices(polygon);
// add the polygon
separatrices2_cells[threadId].push_back(vertexNumber);
for(SimplexId k=0; k<vertexNumber; ++k){
const SimplexId tetraId=polygon[k];
float point[3];
discreteGradient_.getCellIncenter(dcg::Cell(3,tetraId), point);
separatrices2_points[threadId].push_back(point[0]);
separatrices2_points[threadId].push_back(point[1]);
separatrices2_points[threadId].push_back(point[2]);
separatrices2_cells[threadId].push_back(numberOfPoints[threadId]);
++numberOfPoints[threadId];
}
if(outputSeparatrices2_cells_sourceIds_)
separatrices2_cells_sourceIds[threadId].push_back(saddleId);
if(outputSeparatrices2_cells_separatrixIds_)
separatrices2_cells_separatrixIds[threadId].push_back(separatrixIds[threadId]);
if(outputSeparatrices2_cells_separatrixTypes_)
separatrices2_cells_separatrixTypes[threadId].push_back(separatrixType);
if(outputSeparatrices2_cells_separatrixFunctionMaxima)
separatrices2_cells_separatrixFunctionMaxima[threadId].push_back(separatrixFunctionMaximum);
if(outputSeparatrices2_cells_separatrixFunctionMinima)
separatrices2_cells_separatrixFunctionMinima[threadId].push_back(separatrixFunctionMinimum);
if(outputSeparatrices2_cells_separatrixFunctionDiffs)
separatrices2_cells_separatrixFunctionDiffs[threadId].push_back(separatrixFunctionDiff);
if(outputSeparatrices2_cells_isOnBoundary_)
separatrices2_cells_isOnBoundary[threadId].push_back(isOnBoundary);
++numberOfCells[threadId];
isFirst=false;
}
}
}
if(!isFirst)
++separatrixIds[threadId];
}
const SimplexId oldPointSize=outputSeparatrices2_points_->size();
const SimplexId oldCellSize=outputSeparatrices2_cells_->size();
const SimplexId oldFieldSize=outputSeparatrices2_cells_sourceIds_->size();
SimplexId totalNumberOfPoints=0;
SimplexId totalNumberOfCells=0;
{
SimplexId npoints=0;
SimplexId ncells=0;
SimplexId nnpoints=pointId;
SimplexId nncells=0;
SimplexId tmp_separatrixId=separatrixId;
std::vector<SimplexId> offsetPoints(threadNumber_, 0);
std::vector<SimplexId> offsetCells(threadNumber_, 0);
std::vector<SimplexId> offsetNPoints(threadNumber_, 0);
std::vector<SimplexId> offsetNCells(threadNumber_, 0);
std::vector<SimplexId> offsetSeparatrixIds(threadNumber_, 0);
for(ThreadId i=0; i<threadNumber_; ++i){
offsetPoints[i]=npoints;
offsetCells[i]=ncells;
offsetNPoints[i]=nnpoints;
offsetNCells[i]=nncells;
offsetSeparatrixIds[i]=tmp_separatrixId;
npoints+=separatrices2_points[i].size();
ncells+=separatrices2_cells[i].size();
nnpoints+=numberOfPoints[i];
nncells+=numberOfCells[i];
tmp_separatrixId+=separatrixIds[i];
totalNumberOfPoints+=numberOfPoints[i];
totalNumberOfCells+=numberOfCells[i];
}
outputSeparatrices2_points_->resize(oldPointSize+npoints);
outputSeparatrices2_cells_->resize(oldCellSize+ncells);
if(outputSeparatrices2_cells_sourceIds_)
outputSeparatrices2_cells_sourceIds_->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixIds_)
outputSeparatrices2_cells_separatrixIds_->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixTypes_)
outputSeparatrices2_cells_separatrixTypes_->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixFunctionMaxima)
outputSeparatrices2_cells_separatrixFunctionMaxima->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixFunctionMinima)
outputSeparatrices2_cells_separatrixFunctionMinima->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixFunctionDiffs)
outputSeparatrices2_cells_separatrixFunctionDiffs->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_isOnBoundary_)
outputSeparatrices2_cells_isOnBoundary_->resize(oldFieldSize+totalNumberOfCells);
#pragma omp parallel for num_threads(threadNumber_)
for(ThreadId i=0; i<threadNumber_; ++i){
// reduce: points
const SimplexId tmp_npoints=separatrices2_points[i].size();
const SimplexId tmp_offsetPoints=offsetPoints[i];
for(SimplexId j=0; j<tmp_npoints; ++j)
(*outputSeparatrices2_points_)[oldPointSize+tmp_offsetPoints+j]=separatrices2_points[i][j];
// reduce: cells
const SimplexId tmp_ncells=separatrices2_cells[i].size();
const SimplexId tmp_offsetCells=offsetCells[i];
for(SimplexId j=0; j<tmp_ncells;){
const SimplexId cellSize=separatrices2_cells[i][j];
(*outputSeparatrices2_cells_)[oldCellSize+tmp_offsetCells+j]=cellSize;
for(SimplexId k=0; k<cellSize; ++k){
const SimplexId tmp_pointId=separatrices2_cells[i][j+k+1];
(*outputSeparatrices2_cells_)[oldCellSize+tmp_offsetCells+j+k+1]=offsetNPoints[i]+tmp_pointId;
}
j+=(cellSize+1);
}
// reduce: fields
for(SimplexId j=0; j<numberOfCells[i]; ++j){
if(outputSeparatrices2_cells_sourceIds_)
(*outputSeparatrices2_cells_sourceIds_)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_sourceIds[i][j];
if(outputSeparatrices2_cells_separatrixIds_)
(*outputSeparatrices2_cells_separatrixIds_)[oldFieldSize+offsetNCells[i]+j]=
offsetSeparatrixIds[i]+separatrices2_cells_separatrixIds[i][j];
if(outputSeparatrices2_cells_separatrixTypes_)
(*outputSeparatrices2_cells_separatrixTypes_)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_separatrixTypes[i][j];
if(outputSeparatrices2_cells_separatrixFunctionMaxima)
(*outputSeparatrices2_cells_separatrixFunctionMaxima)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_separatrixFunctionMaxima[i][j];
if(outputSeparatrices2_cells_separatrixFunctionMinima)
(*outputSeparatrices2_cells_separatrixFunctionMinima)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_separatrixFunctionMinima[i][j];
if(outputSeparatrices2_cells_separatrixFunctionDiffs)
(*outputSeparatrices2_cells_separatrixFunctionDiffs)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_separatrixFunctionDiffs[i][j];
if(outputSeparatrices2_cells_isOnBoundary_)
(*outputSeparatrices2_cells_isOnBoundary_)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_isOnBoundary[i][j];
}
}
}
(*outputSeparatrices2_numberOfPoints_)+=totalNumberOfPoints;
(*outputSeparatrices2_numberOfCells_)+=totalNumberOfCells;
return 0;
}
#endif
template<typename dataType>
int ttk::MorseSmaleComplex3D::setAscendingSeparatrices2(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
const std::vector<std::set<SimplexId>>& separatricesSaddles) const{
#ifndef TTK_ENABLE_KAMIKAZE
if(!outputSeparatrices2_numberOfPoints_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to numberOfPoints is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_points_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to points is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_numberOfCells_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to numberOfCells is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_cells_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to cells is null." << std::endl;
return -1;
}
if(!inputScalarField_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to the input scalar field is null." << std::endl;
return -1;
}
#endif
const dataType* const scalars=static_cast<dataType*>(inputScalarField_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionMaxima=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionMaxima_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionMinima=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionMinima_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionDiffs=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionDiffs_);
SimplexId pointId=(*outputSeparatrices2_numberOfPoints_);
SimplexId cellId=(*outputSeparatrices2_numberOfCells_);
SimplexId separatrixId=0;
if(outputSeparatrices2_cells_separatrixIds_ and
outputSeparatrices2_cells_separatrixIds_->size()){
separatrixId=*std::max_element(outputSeparatrices2_cells_separatrixIds_->begin(),
outputSeparatrices2_cells_separatrixIds_->end())+1;
}
const SimplexId numberOfCells=inputTriangulation_->getNumberOfCells();
std::vector<SimplexId> isVisited(numberOfCells, -1);
const SimplexId numberOfSeparatrices=separatrices.size();
for(SimplexId i=0; i<numberOfSeparatrices; ++i){
const Separatrix& separatrix=separatrices[i];
if(!separatrix.isValid_) continue;
if(!separatrix.geometry_.size()) continue;
const dcg::Cell& saddle=separatrix.source_;
const char separatrixType=1;
const SimplexId saddleId=saddle.id_;
const dataType separatrixFunctionMinimum=discreteGradient_.scalarMin<dataType>(saddle, scalars);
dataType separatrixFunctionMaximum{};
// get separatrix infos
char isOnBoundary{};
bool isFirst=true;
for(const SimplexId saddle2Id : separatricesSaddles[i]){
if(inputTriangulation_->isTriangleOnBoundary(saddle2Id))
++isOnBoundary;
if(isFirst){
separatrixFunctionMaximum=discreteGradient_.scalarMax<dataType>(dcg::Cell(2,saddle2Id), scalars);
isFirst=false;
}
else{
separatrixFunctionMaximum=std::max(separatrixFunctionMaximum,
discreteGradient_.scalarMax<dataType>(dcg::Cell(2,saddle2Id), scalars));
}
}
const dataType separatrixFunctionDiff=separatrixFunctionMaximum-separatrixFunctionMinimum;
isFirst=true;
for(const SimplexId geometryId : separatrix.geometry_){
for(const dcg::Cell& edge : separatricesGeometry[geometryId]){
const SimplexId edgeId=edge.id_;
// Transform to dual : edge -> polygon
std::vector<SimplexId> polygon;
getDualPolygon(edgeId, polygon);
const SimplexId vertexNumber=polygon.size();
if(vertexNumber>2){
sortDualPolygonVertices(polygon);
// add the polygon
outputSeparatrices2_cells_->push_back(vertexNumber);
float point[3];
for(SimplexId i=0; i<vertexNumber; ++i){
const SimplexId tetraId=polygon[i];
discreteGradient_.getCellIncenter(dcg::Cell(3,tetraId), point);
if(isVisited[tetraId]==-1){
outputSeparatrices2_points_->push_back(point[0]);
outputSeparatrices2_points_->push_back(point[1]);
outputSeparatrices2_points_->push_back(point[2]);
outputSeparatrices2_cells_->push_back(pointId);
isVisited[tetraId]=pointId;
++pointId;
}
else
outputSeparatrices2_cells_->push_back(isVisited[tetraId]);
}
if(outputSeparatrices2_cells_sourceIds_)
outputSeparatrices2_cells_sourceIds_->push_back(saddleId);
if(outputSeparatrices2_cells_separatrixIds_)
outputSeparatrices2_cells_separatrixIds_->push_back(separatrixId);
if(outputSeparatrices2_cells_separatrixTypes_)
outputSeparatrices2_cells_separatrixTypes_->push_back(separatrixType);
if(outputSeparatrices2_cells_separatrixFunctionMaxima)
outputSeparatrices2_cells_separatrixFunctionMaxima->push_back(separatrixFunctionMaximum);
if(outputSeparatrices2_cells_separatrixFunctionMinima)
outputSeparatrices2_cells_separatrixFunctionMinima->push_back(separatrixFunctionMinimum);
if(outputSeparatrices2_cells_separatrixFunctionDiffs)
outputSeparatrices2_cells_separatrixFunctionDiffs->push_back(separatrixFunctionDiff);
if(outputSeparatrices2_cells_isOnBoundary_)
outputSeparatrices2_cells_isOnBoundary_->push_back(isOnBoundary);
++cellId;
isFirst=false;
}
}
}
if(!isFirst)
++separatrixId;
}
(*outputSeparatrices2_numberOfPoints_)=pointId;
(*outputSeparatrices2_numberOfCells_)=cellId;
return 0;
}
#ifdef TTK_ENABLE_OPENMP
template<typename dataType>
int ttk::MorseSmaleComplex3D::omp_setDescendingSeparatrices2(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
const std::vector<std::set<SimplexId>>& separatricesSaddles) const{
#ifndef TTK_ENABLE_KAMIKAZE
if(!outputSeparatrices2_numberOfPoints_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to numberOfPoints is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_points_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to points is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_numberOfCells_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to numberOfCells is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_cells_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to cells is null." << std::endl;
return -1;
}
if(!inputScalarField_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to the input scalar field is null." << std::endl;
return -1;
}
#endif
const dataType* const scalars=static_cast<dataType*>(inputScalarField_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionMaxima=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionMaxima_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionMinima=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionMinima_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionDiffs=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionDiffs_);
SimplexId pointId=(*outputSeparatrices2_numberOfPoints_);
SimplexId separatrixId=0;
if(outputSeparatrices2_cells_separatrixIds_ and
outputSeparatrices2_cells_separatrixIds_->size()){
separatrixId=*std::max_element(outputSeparatrices2_cells_separatrixIds_->begin(),
outputSeparatrices2_cells_separatrixIds_->end())+1;
}
std::vector<SimplexId> separatrixIds(threadNumber_, 0);
std::vector<SimplexId> numberOfPoints(threadNumber_, 0);
std::vector<std::vector<float>> separatrices2_points(threadNumber_);
std::vector<SimplexId> numberOfCells(threadNumber_, 0);
std::vector<std::vector<SimplexId>> separatrices2_cells(threadNumber_);
std::vector<std::vector<SimplexId>> separatrices2_cells_sourceIds(threadNumber_);
std::vector<std::vector<SimplexId>> separatrices2_cells_separatrixIds(threadNumber_);
std::vector<std::vector<char>> separatrices2_cells_separatrixTypes(threadNumber_);
std::vector<std::vector<dataType>> separatrices2_cells_separatrixFunctionMaxima(threadNumber_);
std::vector<std::vector<dataType>> separatrices2_cells_separatrixFunctionMinima(threadNumber_);
std::vector<std::vector<dataType>> separatrices2_cells_separatrixFunctionDiffs(threadNumber_);
std::vector<std::vector<char>> separatrices2_cells_isOnBoundary(threadNumber_);
const SimplexId numberOfSeparatrices=separatrices.size();
#pragma omp parallel for num_threads(threadNumber_)
for(SimplexId i=0; i<numberOfSeparatrices; ++i){
const ThreadId threadId=omp_get_thread_num();
const Separatrix& separatrix=separatrices[i];
if(!separatrix.isValid_) continue;
if(!separatrix.geometry_.size()) continue;
const dcg::Cell& saddle=separatrix.source_;
const char separatrixType=2;
const SimplexId saddleId=saddle.id_;
const dataType separatrixFunctionMaximum=discreteGradient_.scalarMax<dataType>(saddle, scalars);
dataType separatrixFunctionMinimum{};
// get separatrix infos
char isOnBoundary{};
bool isFirst=true;
for(const SimplexId saddle1Id : separatricesSaddles[i]){
if(inputTriangulation_->isEdgeOnBoundary(saddle1Id))
++isOnBoundary;
if(isFirst){
separatrixFunctionMinimum=discreteGradient_.scalarMin<dataType>(dcg::Cell(1,saddle1Id), scalars);
isFirst=false;
}
else{
separatrixFunctionMinimum=std::min(separatrixFunctionMinimum,
discreteGradient_.scalarMin<dataType>(dcg::Cell(1,saddle1Id), scalars));
}
}
const dataType separatrixFunctionDiff=separatrixFunctionMaximum-separatrixFunctionMinimum;
isFirst=true;
for(const SimplexId geometryId : separatrix.geometry_){
for(const dcg::Cell& cell : separatricesGeometry[geometryId]){
const SimplexId triangleId=cell.id_;
separatrices2_cells[threadId].push_back(3);
for(int k=0; k<3; ++k){
SimplexId vertexId;
inputTriangulation_->getTriangleVertex(triangleId, k, vertexId);
float point[3];
inputTriangulation_->getVertexPoint(vertexId, point[0], point[1], point[2]);
separatrices2_points[threadId].push_back(point[0]);
separatrices2_points[threadId].push_back(point[1]);
separatrices2_points[threadId].push_back(point[2]);
separatrices2_cells[threadId].push_back(numberOfPoints[threadId]);
++numberOfPoints[threadId];
}
if(outputSeparatrices2_cells_sourceIds_)
separatrices2_cells_sourceIds[threadId].push_back(saddleId);
if(outputSeparatrices2_cells_separatrixIds_)
separatrices2_cells_separatrixIds[threadId].push_back(separatrixIds[threadId]);
if(outputSeparatrices2_cells_separatrixTypes_)
separatrices2_cells_separatrixTypes[threadId].push_back(separatrixType);
if(outputSeparatrices2_cells_separatrixFunctionMaxima)
separatrices2_cells_separatrixFunctionMaxima[threadId].push_back(separatrixFunctionMaximum);
if(outputSeparatrices2_cells_separatrixFunctionMinima)
separatrices2_cells_separatrixFunctionMinima[threadId].push_back(separatrixFunctionMinimum);
if(outputSeparatrices2_cells_separatrixFunctionDiffs)
separatrices2_cells_separatrixFunctionDiffs[threadId].push_back(separatrixFunctionDiff);
if(outputSeparatrices2_cells_isOnBoundary_)
separatrices2_cells_isOnBoundary[threadId].push_back(isOnBoundary);
++numberOfCells[threadId];
isFirst=false;
}
}
if(!isFirst)
++separatrixIds[threadId];
}
const SimplexId oldPointSize=outputSeparatrices2_points_->size();
const SimplexId oldCellSize=outputSeparatrices2_cells_->size();
const SimplexId oldFieldSize=outputSeparatrices2_cells_sourceIds_->size();
SimplexId totalNumberOfPoints=0;
SimplexId totalNumberOfCells=0;
{
SimplexId npoints=0;
SimplexId ncells=0;
SimplexId nnpoints=pointId;
SimplexId nncells=0;
SimplexId tmp_separatrixId=separatrixId;
std::vector<SimplexId> offsetPoints(threadNumber_, 0);
std::vector<SimplexId> offsetCells(threadNumber_, 0);
std::vector<SimplexId> offsetNPoints(threadNumber_, 0);
std::vector<SimplexId> offsetNCells(threadNumber_, 0);
std::vector<SimplexId> offsetSeparatrixIds(threadNumber_, 0);
for(ThreadId i=0; i<threadNumber_; ++i){
offsetPoints[i]=npoints;
offsetCells[i]=ncells;
offsetNPoints[i]=nnpoints;
offsetNCells[i]=nncells;
offsetSeparatrixIds[i]=tmp_separatrixId;
npoints+=separatrices2_points[i].size();
ncells+=separatrices2_cells[i].size();
nnpoints+=numberOfPoints[i];
nncells+=numberOfCells[i];
tmp_separatrixId+=separatrixIds[i];
totalNumberOfPoints+=numberOfPoints[i];
totalNumberOfCells+=numberOfCells[i];
}
outputSeparatrices2_points_->resize(oldPointSize+npoints);
outputSeparatrices2_cells_->resize(oldCellSize+ncells);
if(outputSeparatrices2_cells_sourceIds_)
outputSeparatrices2_cells_sourceIds_->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixIds_)
outputSeparatrices2_cells_separatrixIds_->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixTypes_)
outputSeparatrices2_cells_separatrixTypes_->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixFunctionMaxima)
outputSeparatrices2_cells_separatrixFunctionMaxima->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixFunctionMinima)
outputSeparatrices2_cells_separatrixFunctionMinima->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_separatrixFunctionDiffs)
outputSeparatrices2_cells_separatrixFunctionDiffs->resize(oldFieldSize+totalNumberOfCells);
if(outputSeparatrices2_cells_isOnBoundary_)
outputSeparatrices2_cells_isOnBoundary_->resize(oldFieldSize+totalNumberOfCells);
#pragma omp parallel for num_threads(threadNumber_)
for(ThreadId i=0; i<threadNumber_; ++i){
// reduce: points
const SimplexId tmp_npoints=separatrices2_points[i].size();
const SimplexId tmp_offsetPoints=offsetPoints[i];
for(SimplexId j=0; j<tmp_npoints; ++j)
(*outputSeparatrices2_points_)[oldPointSize+tmp_offsetPoints+j]=separatrices2_points[i][j];
// reduce: cells
const SimplexId tmp_ncells=separatrices2_cells[i].size();
const SimplexId tmp_offsetCells=offsetCells[i];
for(SimplexId j=0; j<tmp_ncells;){
const SimplexId cellSize=separatrices2_cells[i][j];
(*outputSeparatrices2_cells_)[oldCellSize+tmp_offsetCells+j]=cellSize;
for(SimplexId k=0; k<cellSize; ++k){
const SimplexId tmp_pointId=separatrices2_cells[i][j+k+1];
(*outputSeparatrices2_cells_)[oldCellSize+tmp_offsetCells+j+k+1]=offsetNPoints[i]+tmp_pointId;
}
j+=(cellSize+1);
}
// reduce: fields
for(SimplexId j=0; j<numberOfCells[i]; ++j){
if(outputSeparatrices2_cells_sourceIds_)
(*outputSeparatrices2_cells_sourceIds_)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_sourceIds[i][j];
if(outputSeparatrices2_cells_separatrixIds_)
(*outputSeparatrices2_cells_separatrixIds_)[oldFieldSize+offsetNCells[i]+j]=
offsetSeparatrixIds[i]+separatrices2_cells_separatrixIds[i][j];
if(outputSeparatrices2_cells_separatrixTypes_)
(*outputSeparatrices2_cells_separatrixTypes_)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_separatrixTypes[i][j];
if(outputSeparatrices2_cells_separatrixFunctionMaxima)
(*outputSeparatrices2_cells_separatrixFunctionMaxima)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_separatrixFunctionMaxima[i][j];
if(outputSeparatrices2_cells_separatrixFunctionMinima)
(*outputSeparatrices2_cells_separatrixFunctionMinima)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_separatrixFunctionMinima[i][j];
if(outputSeparatrices2_cells_separatrixFunctionDiffs)
(*outputSeparatrices2_cells_separatrixFunctionDiffs)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_separatrixFunctionDiffs[i][j];
if(outputSeparatrices2_cells_isOnBoundary_)
(*outputSeparatrices2_cells_isOnBoundary_)[oldFieldSize+offsetNCells[i]+j]=
separatrices2_cells_isOnBoundary[i][j];
}
}
}
(*outputSeparatrices2_numberOfPoints_)+=totalNumberOfPoints;
(*outputSeparatrices2_numberOfCells_)+=totalNumberOfCells;
return 0;
}
#endif
template<typename dataType>
int ttk::MorseSmaleComplex3D::setDescendingSeparatrices2(const std::vector<Separatrix>& separatrices,
const std::vector<std::vector<dcg::Cell>>& separatricesGeometry,
const std::vector<std::set<SimplexId>>& separatricesSaddles) const{
#ifndef TTK_ENABLE_KAMIKAZE
if(!outputSeparatrices2_numberOfPoints_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to numberOfPoints is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_points_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to points is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_numberOfCells_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to numberOfCells is null." << std::endl;
return -1;
}
if(!outputSeparatrices2_cells_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to cells is null." << std::endl;
return -1;
}
if(!inputScalarField_){
std::cerr << "[MorseSmaleComplex3D] 2-separatrices pointer to the input scalar field is null." << std::endl;
return -1;
}
#endif
const dataType* const scalars=static_cast<dataType*>(inputScalarField_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionMaxima=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionMaxima_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionMinima=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionMinima_);
std::vector<dataType>* outputSeparatrices2_cells_separatrixFunctionDiffs=
static_cast<std::vector<dataType>*>(outputSeparatrices2_cells_separatrixFunctionDiffs_);
SimplexId pointId=(*outputSeparatrices2_numberOfPoints_);
SimplexId cellId=(*outputSeparatrices2_numberOfCells_);
SimplexId separatrixId=0;
if(outputSeparatrices2_cells_separatrixIds_ and
outputSeparatrices2_cells_separatrixIds_->size()){
separatrixId=*std::max_element(outputSeparatrices2_cells_separatrixIds_->begin(),
outputSeparatrices2_cells_separatrixIds_->end())+1;
}
const SimplexId numberOfVertices=inputTriangulation_->getNumberOfVertices();
std::vector<SimplexId> isVisited(numberOfVertices, -1);
const SimplexId numberOfSeparatrices=separatrices.size();
for(SimplexId i=0; i<numberOfSeparatrices; ++i){
const Separatrix& separatrix=separatrices[i];
if(!separatrix.isValid_) continue;
if(!separatrix.geometry_.size()) continue;
const dcg::Cell& saddle=separatrix.source_;
const char separatrixType=2;
const SimplexId saddleId=saddle.id_;
const dataType separatrixFunctionMaximum=discreteGradient_.scalarMax<dataType>(saddle, scalars);
dataType separatrixFunctionMinimum{};
// get separatrix infos
char isOnBoundary{};
bool isFirst=true;
for(const SimplexId saddle1Id : separatricesSaddles[i]){
if(inputTriangulation_->isEdgeOnBoundary(saddle1Id))
++isOnBoundary;
if(isFirst){
separatrixFunctionMinimum=discreteGradient_.scalarMin<dataType>(dcg::Cell(1,saddle1Id), scalars);
isFirst=false;
}
else{
separatrixFunctionMinimum=std::min(separatrixFunctionMinimum,
discreteGradient_.scalarMin<dataType>(dcg::Cell(1,saddle1Id), scalars));
}
}
const dataType separatrixFunctionDiff=separatrixFunctionMaximum-separatrixFunctionMinimum;
isFirst=true;
for(const SimplexId geometryId : separatrix.geometry_){
for(const dcg::Cell& cell : separatricesGeometry[geometryId]){
const SimplexId triangleId=cell.id_;
outputSeparatrices2_cells_->push_back(3);
float point[3];
for(int k=0; k<3; ++k){
SimplexId vertexId;
inputTriangulation_->getTriangleVertex(triangleId, k, vertexId);
if(isVisited[vertexId]==-1){
inputTriangulation_->getVertexPoint(vertexId, point[0], point[1], point[2]);
outputSeparatrices2_points_->push_back(point[0]);
outputSeparatrices2_points_->push_back(point[1]);
outputSeparatrices2_points_->push_back(point[2]);
outputSeparatrices2_cells_->push_back(pointId);
isVisited[vertexId]=pointId;
++pointId;
}
else
outputSeparatrices2_cells_->push_back(isVisited[vertexId]);
}
if(outputSeparatrices2_cells_sourceIds_)
outputSeparatrices2_cells_sourceIds_->push_back(saddleId);
if(outputSeparatrices2_cells_separatrixIds_)
outputSeparatrices2_cells_separatrixIds_->push_back(separatrixId);
if(outputSeparatrices2_cells_separatrixTypes_)
outputSeparatrices2_cells_separatrixTypes_->push_back(separatrixType);
if(outputSeparatrices2_cells_separatrixFunctionMaxima)
outputSeparatrices2_cells_separatrixFunctionMaxima->push_back(separatrixFunctionMaximum);
if(outputSeparatrices2_cells_separatrixFunctionMinima)
outputSeparatrices2_cells_separatrixFunctionMinima->push_back(separatrixFunctionMinimum);
if(outputSeparatrices2_cells_separatrixFunctionDiffs)
outputSeparatrices2_cells_separatrixFunctionDiffs->push_back(separatrixFunctionDiff);
if(outputSeparatrices2_cells_isOnBoundary_)
outputSeparatrices2_cells_isOnBoundary_->push_back(isOnBoundary);
++cellId;
isFirst=false;
}
}
if(!isFirst)
++separatrixId;
}
(*outputSeparatrices2_numberOfPoints_)=pointId;
(*outputSeparatrices2_numberOfCells_)=cellId;
return 0;
}
template<typename dataType, typename idType>
int ttk::MorseSmaleComplex3D::execute(){
#ifndef TTK_ENABLE_KAMIKAZE
if(!inputScalarField_){
std::cerr << "[MorseSmaleComplex3D] Error: input scalar field pointer is null." << std::endl;
return -1;
}
if(!inputOffsets_){
std::cerr << "[MorseSmaleComplex3D] Error: input offset field pointer is null." << std::endl;
return -1;
}
#endif
Timer t;
// nullptr_t is implicitly convertible and comparable to any pointer type
// or pointer-to-member type.
SimplexId* ascendingManifold=static_cast<SimplexId*>(outputAscendingManifold_);
SimplexId* descendingManifold=static_cast<SimplexId*>(outputDescendingManifold_);
SimplexId* morseSmaleManifold=static_cast<SimplexId*>(outputMorseSmaleManifold_);
discreteGradient_.setThreadNumber(threadNumber_);
discreteGradient_.setDebugLevel(debugLevel_);
{
Timer tmp;
discreteGradient_.buildGradient<dataType,idType>();
discreteGradient_.buildGradient2<dataType,idType>();
discreteGradient_.buildGradient3<dataType,idType>();
{
std::stringstream msg;
msg << "[MorseSmaleComplex3D] Discrete gradient overall computed in "
<< tmp.getElapsedTime() << " s."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
discreteGradient_.reverseGradient<dataType,idType>();
std::vector<dcg::Cell> criticalPoints;
discreteGradient_.getCriticalPoints(criticalPoints);
// 1-separatrices
if(ComputeDescendingSeparatrices1){
Timer tmp;
std::vector<Separatrix> separatrices;
std::vector<std::vector<dcg::Cell>> separatricesGeometry;
getDescendingSeparatrices1(criticalPoints, separatrices, separatricesGeometry);
setSeparatrices1<dataType>(separatrices, separatricesGeometry);
{
std::stringstream msg;
msg << "[MorseSmaleComplex3D] Descending 1-separatrices computed in "
<< tmp.getElapsedTime() << " s."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
if(ComputeAscendingSeparatrices1){
Timer tmp;
std::vector<Separatrix> separatrices;
std::vector<std::vector<dcg::Cell>> separatricesGeometry;
getAscendingSeparatrices1(criticalPoints, separatrices, separatricesGeometry);
setSeparatrices1<dataType>(separatrices, separatricesGeometry);
{
std::stringstream msg;
msg << "[MorseSmaleComplex3D] Ascending 1-separatrices computed in "
<< tmp.getElapsedTime() << " s."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
// saddle-connectors
if(ComputeSaddleConnectors){
Timer tmp;
std::vector<Separatrix> separatrices;
std::vector<std::vector<dcg::Cell>> separatricesGeometry;
getSaddleConnectors(criticalPoints, separatrices, separatricesGeometry);
setSaddleConnectors<dataType>(separatrices, separatricesGeometry);
{
std::stringstream msg;
msg << "[MorseSmaleComplex3D] Saddle connectors computed in "
<< tmp.getElapsedTime() << " s."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
// 2-separatrices
if(ComputeDescendingSeparatrices2){
Timer tmp;
std::vector<Separatrix> separatrices;
std::vector<std::vector<dcg::Cell>> separatricesGeometry;
std::vector<std::set<SimplexId>> separatricesSaddles;
getDescendingSeparatrices2(criticalPoints, separatrices, separatricesGeometry, separatricesSaddles);
#ifdef TTK_ENABLE_OPENMP
if(PrioritizeSpeedOverMemory)
omp_setDescendingSeparatrices2<dataType>(separatrices, separatricesGeometry, separatricesSaddles);
else
#endif
setDescendingSeparatrices2<dataType>(separatrices, separatricesGeometry, separatricesSaddles);
{
std::stringstream msg;
msg << "[MorseSmaleComplex3D] Descending 2-separatrices computed in "
<< tmp.getElapsedTime() << " s."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
if(ComputeAscendingSeparatrices2){
Timer tmp;
std::vector<Separatrix> separatrices;
std::vector<std::vector<dcg::Cell>> separatricesGeometry;
std::vector<std::set<SimplexId>> separatricesSaddles;
getAscendingSeparatrices2(criticalPoints, separatrices, separatricesGeometry, separatricesSaddles);
#ifdef TTK_ENABLE_OPENMP
if(PrioritizeSpeedOverMemory)
omp_setAscendingSeparatrices2<dataType>(separatrices, separatricesGeometry, separatricesSaddles);
else
#endif
setAscendingSeparatrices2<dataType>(separatrices, separatricesGeometry, separatricesSaddles);
{
std::stringstream msg;
msg << "[MorseSmaleComplex3D] Ascending 2-separatrices computed in "
<< tmp.getElapsedTime() << " s."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
std::vector<SimplexId> maxSeeds;
{
Timer tmp;
SimplexId numberOfMaxima{};
SimplexId numberOfMinima{};
if(ascendingManifold)
setAscendingSegmentation(criticalPoints, maxSeeds, ascendingManifold, numberOfMaxima);
if(descendingManifold)
setDescendingSegmentation(criticalPoints, descendingManifold, numberOfMinima);
if(ascendingManifold and descendingManifold and morseSmaleManifold)
setFinalSegmentation(numberOfMaxima, numberOfMinima, ascendingManifold, descendingManifold, morseSmaleManifold);
if(ascendingManifold or descendingManifold){
std::stringstream msg;
msg << "[MorseSmaleComplex3D] Segmentation computed in "
<< tmp.getElapsedTime() << " s."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
}
if(outputCriticalPoints_numberOfPoints_ and outputSeparatrices1_points_){
if(ascendingManifold and descendingManifold)
discreteGradient_.setAugmentedCriticalPoints<dataType,idType>(criticalPoints,
maxSeeds,
ascendingManifold,
descendingManifold);
else
discreteGradient_.setCriticalPoints<dataType,idType>(criticalPoints);
}
{
const SimplexId numberOfVertices=inputTriangulation_->getNumberOfVertices();
std::stringstream msg;
msg << "[MorseSmaleComplex3D] Data-set (" << numberOfVertices
<< " points) processed in "
<< t.getElapsedTime() << " s. (" << threadNumber_
<< " thread(s))."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
return 0;
}
template<typename dataType, typename idType>
int ttk::MorseSmaleComplex3D::computePersistencePairs(const std::vector<std::tuple<SimplexId, SimplexId, dataType>>& JTPairs,
const std::vector<std::tuple<SimplexId, SimplexId, dataType>>& STPairs,
std::vector<std::tuple<SimplexId,SimplexId,dataType>>& pl_saddleSaddlePairs){
dataType* scalars=static_cast<dataType*>(inputScalarField_);
const SimplexId numberOfVertices=inputTriangulation_->getNumberOfVertices();
// get original list of critical points
std::vector<std::pair<SimplexId,char>> pl_criticalPoints;
{
const SimplexId* const offsets=static_cast<SimplexId*>(inputOffsets_);
std::vector<SimplexId> sosOffsets(numberOfVertices);
for(SimplexId i=0; i<numberOfVertices; ++i)
sosOffsets[i]=offsets[i];
ScalarFieldCriticalPoints<dataType> scp;
scp.setDebugLevel(debugLevel_);
scp.setThreadNumber(threadNumber_);
scp.setDomainDimension(inputTriangulation_->getDimensionality());
scp.setScalarValues(inputScalarField_);
scp.setVertexNumber(numberOfVertices);
scp.setSosOffsets(&sosOffsets);
scp.setupTriangulation(inputTriangulation_);
scp.setOutput(&pl_criticalPoints);
scp.execute();
}
// build accepting list
std::vector<char> isAccepted(numberOfVertices, false);
for(const auto& i : JTPairs){
const SimplexId v0=std::get<0>(i);
const SimplexId v1=std::get<1>(i);
isAccepted[v0]=true;
isAccepted[v1]=true;
}
for(const auto& i : STPairs){
const SimplexId v0=std::get<0>(i);
const SimplexId v1=std::get<1>(i);
isAccepted[v0]=true;
isAccepted[v1]=true;
}
// filter the critical points according to the filtering list and boundary condition
std::vector<char> isSaddle1(numberOfVertices, false);
std::vector<char> isSaddle2(numberOfVertices, false);
std::vector<std::pair<SimplexId,char>> pl_filteredCriticalPoints;
for(const auto& i : pl_criticalPoints){
const SimplexId vertexId=i.first;
const char type=i.second;
if(isAccepted[vertexId]){
pl_filteredCriticalPoints.push_back(i);
switch(type){
case static_cast<char>(CriticalType::Saddle1):
isSaddle1[vertexId]=true;
break;
case static_cast<char>(CriticalType::Saddle2):
isSaddle2[vertexId]=true;
break;
}
}
}
std::vector<std::tuple<dcg::Cell,dcg::Cell>> dmt_pairs;
{
// simplify to be PL-conformant
discreteGradient_.setDebugLevel(debugLevel_);
discreteGradient_.setThreadNumber(threadNumber_);
discreteGradient_.setReverseSaddleMaximumConnection(true);
discreteGradient_.setReverseSaddleSaddleConnection(true);
discreteGradient_.setCollectPersistencePairs(false);
discreteGradient_.buildGradient<dataType,idType>();
discreteGradient_.buildGradient2<dataType,idType>();
discreteGradient_.buildGradient3<dataType,idType>();
discreteGradient_.reverseGradient<dataType,idType>(pl_criticalPoints);
// collect saddle-saddle connections
discreteGradient_.setReverseSaddleMaximumConnection(true);
discreteGradient_.setCollectPersistencePairs(true);
discreteGradient_.setOutputPersistencePairs(&dmt_pairs);
discreteGradient_.reverseGradient<dataType,idType>(pl_filteredCriticalPoints);
}
// transform DMT pairs into PL pairs
for(const auto& i : dmt_pairs){
const dcg::Cell& saddle1=std::get<0>(i);
const dcg::Cell& saddle2=std::get<1>(i);
SimplexId v0=-1;
for(SimplexId j=0; j<2; ++j){
SimplexId vertexId;
inputTriangulation_->getEdgeVertex(saddle1.id_, j, vertexId);
if(isSaddle1[vertexId]){
v0=vertexId;
break;
}
}
if(v0==-1){
dataType scalar{};
for(int j=0; j<2; ++j){
SimplexId vertexId;
inputTriangulation_->getEdgeVertex(saddle1.id_,j,vertexId);
const dataType vertexScalar=scalars[vertexId];
if(!j or scalar>vertexScalar){
v0=vertexId;
scalar=vertexScalar;
}
}
}
SimplexId v1=-1;
for(int j=0; j<3; ++j){
SimplexId vertexId;
inputTriangulation_->getTriangleVertex(saddle2.id_, j, vertexId);
if(isSaddle2[vertexId]){
v1=vertexId;
break;
}
}
if(v1==-1){
dataType scalar{};
for(int j=0; j<3; ++j){
SimplexId vertexId;
inputTriangulation_->getTriangleVertex(saddle2.id_,j,vertexId);
const dataType vertexScalar=scalars[vertexId];
if(!j or scalar<vertexScalar){
v1=vertexId;
scalar=vertexScalar;
}
}
}
const dataType persistence=scalars[v1]-scalars[v0];
if(v0!=-1 and v1!=-1 and persistence>=0){
if(!inputTriangulation_->isVertexOnBoundary(v0) or !inputTriangulation_->isVertexOnBoundary(v1)){
pl_saddleSaddlePairs.push_back(std::make_tuple(v0,v1,persistence));
}
}
}
return 0;
}
#endif // MORSESMALECOMPLEX3D_H
|
metadirective.c | #define N 100
#include <stdio.h>
int main()
{
int errors = 0;
int v1[N], v2[N], v3[N];
for(int i=0; i<N; i++){ v1[i]=(i+1); v2[i]=-(i+1); }
#pragma omp target map(to:v1,v2) map(from:v3) device(0)
#pragma omp metadirective \
when( device={arch("amdgcn")}: teams loop) \
default( parallel loop)
for (int i= 0; i< N; i++) v3[i] = v1[i] * v2[i];
printf(" %d %d\n",v3[0],v3[N-1]); //output: -1 -10000
for(int i=0; i<N; i++){
if(v3[i] != v1[i] * v2[i]){
printf("v3[%d]: %d Correct:%d\n", i, v3[i], v1[i] * v2[i]);
errors+=1;
}
}
if(errors){
printf("Fail!\n");
return 1;
}else
printf("Success\n");
return 0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 4194304
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -SSIZE_MAX)
return((double) -SSIZE_MAX);
if (x > (double) SSIZE_MAX)
return((double) SSIZE_MAX);
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
(void) DrawAffineImage(image,composite_image,&affine,exception);
else
(void) CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
GB_binop__second_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__second_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__second_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__second_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int16)
// A*D function (colscale): GB (_AxD__second_int16)
// D*A function (rowscale): GB (_DxB__second_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__second_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__second_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int16)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT16 || GxB_NO_SECOND_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
Matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matrix - Matrix stored and accessible by rows. Indices and values for
* the matrix nonzeros are copied into the matrix a row at a time, in any
* order using the MatrixGetRow function. The MatrixPutRow function returns
* a pointer to the indices and values of a row. The matrix has a set of
* row and column indices such that these indices begin at "beg" and end
* at "end", where 0 <= "beg" <= "end". In other words, the matrix indices
* have any nonnegative base value, and the base values of the row and column
* indices must agree.
*
*****************************************************************************/
#include "Matrix.h"
#include "Common.h"
#include "Numbering.h"
#include <memory.h>
#include <stdlib.h>
#ifdef __ve__
#include <sblas.h>
#endif
#define MAX_NZ_PER_ROW 1000
/*--------------------------------------------------------------------------
* MatrixCreate - Return (a pointer to) a matrix object.
*--------------------------------------------------------------------------*/
Matrix *MatrixCreate(MPI_Comm comm, HYPRE_Int beg_row, HYPRE_Int end_row) {
HYPRE_Int num_rows, mype, npes;
Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST);
mat->comm = comm;
mat->beg_row = beg_row;
mat->end_row = end_row;
mat->mem = (Mem *)MemCreate();
num_rows = mat->end_row - mat->beg_row + 1;
mat->lens = (HYPRE_Int *)MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int));
mat->inds = (HYPRE_Int **)MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *));
mat->vals =
(HYPRE_Real **)MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *));
// fprintf(stderr, "MatrixCreate: %d\n", num_rows);
/* Send beg_row and end_row to all processors */
/* This is needed in order to map row numbers to processors */
hypre_MPI_Comm_rank(comm, &mype);
hypre_MPI_Comm_size(comm, &npes);
mat->beg_rows = (HYPRE_Int *)MemAlloc(mat->mem, npes * sizeof(HYPRE_Int));
mat->end_rows = (HYPRE_Int *)MemAlloc(mat->mem, npes * sizeof(HYPRE_Int));
hypre_MPI_Allgather(&beg_row, 1, HYPRE_MPI_INT, mat->beg_rows, 1,
HYPRE_MPI_INT, comm);
hypre_MPI_Allgather(&end_row, 1, HYPRE_MPI_INT, mat->end_rows, 1,
HYPRE_MPI_INT, comm);
mat->num_recv = 0;
mat->num_send = 0;
mat->flag = 0;
mat->t_flag = 0;
mat->recv_req = NULL;
mat->send_req = NULL;
mat->recv_req2 = NULL;
mat->send_req2 = NULL;
mat->statuses = NULL;
mat->sendind = NULL;
mat->sendbuf = NULL;
mat->recvbuf = NULL;
mat->numb = NULL;
return mat;
}
/*--------------------------------------------------------------------------
* MatrixCreateLocal - Return (a pointer to) a matrix object.
* The matrix created by this call is a local matrix, not a global matrix.
*--------------------------------------------------------------------------*/
Matrix *MatrixCreateLocal(HYPRE_Int beg_row, HYPRE_Int end_row) {
HYPRE_Int num_rows;
Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST);
mat->comm = hypre_MPI_COMM_NULL;
mat->beg_row = beg_row;
mat->end_row = end_row;
mat->mem = (Mem *)MemCreate();
num_rows = mat->end_row - mat->beg_row + 1;
mat->lens = (HYPRE_Int *)MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int));
mat->inds = (HYPRE_Int **)MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *));
mat->vals =
(HYPRE_Real **)MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *));
/* Send beg_row and end_row to all processors */
/* This is needed in order to map row numbers to processors */
mat->beg_rows = NULL;
mat->end_rows = NULL;
mat->num_recv = 0;
mat->num_send = 0;
mat->recv_req = NULL;
mat->send_req = NULL;
mat->recv_req2 = NULL;
mat->send_req2 = NULL;
mat->statuses = NULL;
mat->sendind = NULL;
mat->sendbuf = NULL;
mat->recvbuf = NULL;
mat->numb = NULL;
return mat;
}
/*--------------------------------------------------------------------------
* MatrixDestroy - Destroy a matrix object "mat".
*--------------------------------------------------------------------------*/
void MatrixDestroy(Matrix *mat) {
HYPRE_Int i;
for (i = 0; i < mat->num_recv; i++)
hypre_MPI_Request_free(&mat->recv_req[i]);
for (i = 0; i < mat->num_send; i++)
hypre_MPI_Request_free(&mat->send_req[i]);
for (i = 0; i < mat->num_send; i++)
hypre_MPI_Request_free(&mat->recv_req2[i]);
for (i = 0; i < mat->num_recv; i++)
hypre_MPI_Request_free(&mat->send_req2[i]);
free(mat->recv_req);
free(mat->send_req);
free(mat->recv_req2);
free(mat->send_req2);
free(mat->statuses);
free(mat->sendind);
free(mat->sendbuf);
free(mat->recvbuf);
// a bug when ftrace is enabled
#ifndef _FTRACE
sblas_destroy_matrix_handle(mat->hnd);
sblas_destroy_matrix_handle(mat->t_hnd);
#endif
MemDestroy(mat->mem);
if (mat->numb)
NumberingDestroy(mat->numb);
free(mat);
}
/*--------------------------------------------------------------------------
* MatrixSetRow - Set a row in a matrix. Only local rows can be set.
* Once a row has been set, it should not be set again, or else the
* memory used by the existing row will not be recovered until
* the matrix is destroyed. "row" is in global coordinate numbering.
*--------------------------------------------------------------------------*/
void MatrixSetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int len, HYPRE_Int *ind,
HYPRE_Real *val) {
row -= mat->beg_row;
mat->lens[row] = len;
mat->inds[row] = (HYPRE_Int *)MemAlloc(mat->mem, len * sizeof(HYPRE_Int));
mat->vals[row] = (HYPRE_Real *)MemAlloc(mat->mem, len * sizeof(HYPRE_Real));
if (ind != NULL)
hypre_TMemcpy(mat->inds[row], ind, HYPRE_Int, len, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
if (val != NULL)
hypre_TMemcpy(mat->vals[row], val, HYPRE_Real, len, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
}
/*--------------------------------------------------------------------------
* MatrixGetRow - Get a *local* row in a matrix.
*--------------------------------------------------------------------------*/
void MatrixGetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int *lenp, HYPRE_Int **indp,
HYPRE_Real **valp) {
*lenp = mat->lens[row];
*indp = mat->inds[row];
*valp = mat->vals[row];
}
/*--------------------------------------------------------------------------
* MatrixRowPe - Map "row" to a processor number.
*--------------------------------------------------------------------------*/
HYPRE_Int MatrixRowPe(Matrix *mat, HYPRE_Int row) {
HYPRE_Int npes, pe;
HYPRE_Int *beg = mat->beg_rows;
HYPRE_Int *end = mat->end_rows;
hypre_MPI_Comm_size(mat->comm, &npes);
for (pe = 0; pe < npes; pe++) {
if (row >= beg[pe] && row <= end[pe])
return pe;
}
hypre_printf("MatrixRowPe: could not map row %d.\n", row);
PARASAILS_EXIT;
return -1; /* for picky compilers */
}
/*--------------------------------------------------------------------------
* MatrixNnz - Return total number of nonzeros in preconditioner.
*--------------------------------------------------------------------------*/
HYPRE_Int MatrixNnz(Matrix *mat) {
HYPRE_Int num_local, i, total, alltotal;
num_local = mat->end_row - mat->beg_row + 1;
total = 0;
for (i = 0; i < num_local; i++)
total += mat->lens[i];
hypre_MPI_Allreduce(&total, &alltotal, 1, HYPRE_MPI_INT, hypre_MPI_SUM,
mat->comm);
return alltotal;
}
/*--------------------------------------------------------------------------
* MatrixPrint - Print a matrix to a file "filename". Each processor
* appends to the file in order, but the file is overwritten if it exists.
*--------------------------------------------------------------------------*/
void MatrixPrint(Matrix *mat, char *filename) {
HYPRE_Int mype, npes, pe;
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val;
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Comm_size(mat->comm, &npes);
for (pe = 0; pe < npes; pe++) {
hypre_MPI_Barrier(mat->comm);
if (mype == pe) {
FILE *file = fopen(filename, (pe == 0 ? "w" : "a"));
hypre_assert(file != NULL);
for (row = 0; row <= mat->end_row - mat->beg_row; row++) {
MatrixGetRow(mat, row, &len, &ind, &val);
for (i = 0; i < len; i++)
hypre_fprintf(file, "%d %d %.14e\n", row + mat->beg_row,
mat->numb->local_to_global[ind[i]], val[i]);
}
fclose(file);
}
}
}
/*--------------------------------------------------------------------------
* MatrixReadMaster - MatrixRead routine for processor 0. Internal use.
*--------------------------------------------------------------------------*/
static void MatrixReadMaster(Matrix *mat, char *filename) {
MPI_Comm comm = mat->comm;
HYPRE_Int mype, npes;
FILE *file;
HYPRE_Int ret;
HYPRE_Int num_rows, curr_proc;
HYPRE_Int row, col;
HYPRE_Real value;
hypre_longint offset;
hypre_longint outbuf;
HYPRE_Int curr_row;
HYPRE_Int len;
HYPRE_Int ind[MAX_NZ_PER_ROW];
HYPRE_Real val[MAX_NZ_PER_ROW];
char line[100];
HYPRE_Int oldrow;
hypre_MPI_Request request;
hypre_MPI_Status status;
hypre_MPI_Comm_size(mat->comm, &npes);
hypre_MPI_Comm_rank(mat->comm, &mype);
file = fopen(filename, "r");
hypre_assert(file != NULL);
if (fgets(line, 100, file) == NULL) {
hypre_fprintf(stderr, "Error reading file.\n");
PARASAILS_EXIT;
}
#ifdef EMSOLVE
ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows);
for (row = 0; row < num_rows; row++)
hypre_fscanf(file, "%*d");
#else
ret = hypre_sscanf(line, "%d %*d %*d", &num_rows);
#endif
offset = ftell(file);
hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
request = hypre_MPI_REQUEST_NULL;
curr_proc = 1; /* proc for which we are looking for the beginning */
while (curr_proc < npes) {
if (row == mat->beg_rows[curr_proc]) {
hypre_MPI_Wait(&request, &status);
outbuf = offset;
hypre_MPI_Isend(&outbuf, 1, hypre_MPI_LONG, curr_proc, 0, comm, &request);
curr_proc++;
}
offset = ftell(file);
oldrow = row;
hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
if (oldrow > row) {
hypre_fprintf(stderr, "Matrix file is not sorted by rows.\n");
PARASAILS_EXIT;
}
}
/* Now read our own part */
rewind(file);
if (fgets(line, 100, file) == NULL) {
hypre_fprintf(stderr, "Error reading file.\n");
PARASAILS_EXIT;
}
#ifdef EMSOLVE
ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows);
for (row = 0; row < num_rows; row++)
hypre_fscanf(file, "%*d");
#else
ret = hypre_sscanf(line, "%d %*d %*d", &num_rows);
#endif
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
curr_row = row;
len = 0;
while (ret != EOF && row <= mat->end_row) {
if (row != curr_row) {
/* store this row */
MatrixSetRow(mat, curr_row, len, ind, val);
curr_row = row;
/* reset row pointer */
len = 0;
}
if (len >= MAX_NZ_PER_ROW) {
hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW);
hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n");
hypre_fprintf(stderr, "increased to continue.\n");
PARASAILS_EXIT;
}
ind[len] = col;
val[len] = value;
len++;
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
}
/* Store the final row */
if (ret == EOF || row > mat->end_row)
MatrixSetRow(mat, mat->end_row, len, ind, val);
fclose(file);
hypre_MPI_Wait(&request, &status);
}
/*--------------------------------------------------------------------------
* MatrixReadSlave - MatrixRead routine for other processors. Internal use.
*--------------------------------------------------------------------------*/
static void MatrixReadSlave(Matrix *mat, char *filename) {
MPI_Comm comm = mat->comm;
hypre_MPI_Status status;
HYPRE_Int mype;
FILE *file;
HYPRE_Int ret;
HYPRE_Int row, col;
HYPRE_Real value;
hypre_longint offset;
HYPRE_Int curr_row;
HYPRE_Int len;
HYPRE_Int ind[MAX_NZ_PER_ROW];
HYPRE_Real val[MAX_NZ_PER_ROW];
HYPRE_Real time0, time1;
file = fopen(filename, "r");
hypre_assert(file != NULL);
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Recv(&offset, 1, hypre_MPI_LONG, 0, 0, comm, &status);
time0 = hypre_MPI_Wtime();
ret = fseek(file, offset, SEEK_SET);
hypre_assert(ret == 0);
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
curr_row = row;
len = 0;
while (ret != EOF && row <= mat->end_row) {
if (row != curr_row) {
/* store this row */
MatrixSetRow(mat, curr_row, len, ind, val);
curr_row = row;
/* reset row pointer */
len = 0;
}
if (len >= MAX_NZ_PER_ROW) {
hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW);
hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n");
hypre_fprintf(stderr, "increased to continue.\n");
PARASAILS_EXIT;
}
ind[len] = col;
val[len] = value;
len++;
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
}
/* Store the final row */
if (ret == EOF || row > mat->end_row)
MatrixSetRow(mat, mat->end_row, len, ind, val);
fclose(file);
time1 = hypre_MPI_Wtime();
hypre_printf("%d: Time for slave read: %f\n", mype, time1 - time0);
}
/*--------------------------------------------------------------------------
* MatrixRead - Read a matrix file "filename" from disk and store in the
* matrix "mat" which has already been created using MatrixCreate. The format
* assumes no nonzero rows, the rows are in order, and there will be at least
* one row per processor.
*--------------------------------------------------------------------------*/
void MatrixRead(Matrix *mat, char *filename) {
HYPRE_Int mype;
HYPRE_Real time0, time1;
hypre_MPI_Comm_rank(mat->comm, &mype);
time0 = hypre_MPI_Wtime();
if (mype == 0)
MatrixReadMaster(mat, filename);
else
MatrixReadSlave(mat, filename);
time1 = hypre_MPI_Wtime();
hypre_printf("%d: Time for reading matrix: %f\n", mype, time1 - time0);
MatrixComplete(mat);
}
/*--------------------------------------------------------------------------
* RhsRead - Read a right-hand side file "filename" from disk and store in the
* location pointed to by "rhs". "mat" is needed to provide the partitioning
* information. The expected format is: a header line (n, nrhs) followed
* by n values. Also allows isis format, indicated by 1 HYPRE_Int in first
*line.
*--------------------------------------------------------------------------*/
void RhsRead(HYPRE_Real *rhs, Matrix *mat, char *filename) {
FILE *file;
hypre_MPI_Status status;
HYPRE_Int mype, npes;
HYPRE_Int num_rows, num_local, pe, i, converted;
HYPRE_Real *buffer = NULL;
HYPRE_Int buflen = 0;
char line[100];
HYPRE_Int dummy;
hypre_MPI_Comm_size(mat->comm, &npes);
hypre_MPI_Comm_rank(mat->comm, &mype);
num_local = mat->end_row - mat->beg_row + 1;
if (mype != 0) {
hypre_MPI_Recv(rhs, num_local, hypre_MPI_REAL, 0, 0, mat->comm, &status);
return;
}
file = fopen(filename, "r");
hypre_assert(file != NULL);
if (fgets(line, 100, file) == NULL) {
hypre_fprintf(stderr, "Error reading file.\n");
PARASAILS_EXIT;
}
converted = hypre_sscanf(line, "%d %d", &num_rows, &dummy);
hypre_assert(num_rows == mat->end_rows[npes - 1]);
/* Read own rows first */
for (i = 0; i < num_local; i++)
if (converted == 1) /* isis format */
hypre_fscanf(file, "%*d %lf", &rhs[i]);
else
hypre_fscanf(file, "%lf", &rhs[i]);
for (pe = 1; pe < npes; pe++) {
num_local = mat->end_rows[pe] - mat->beg_rows[pe] + 1;
if (buflen < num_local) {
free(buffer);
buflen = num_local;
buffer = hypre_TAlloc(HYPRE_Real, buflen, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_local; i++)
if (converted == 1) /* isis format */
hypre_fscanf(file, "%*d %lf", &buffer[i]);
else
hypre_fscanf(file, "%lf", &buffer[i]);
hypre_MPI_Send(buffer, num_local, hypre_MPI_REAL, pe, 0, mat->comm);
}
free(buffer);
}
/*--------------------------------------------------------------------------
* SetupReceives
*--------------------------------------------------------------------------*/
static void SetupReceives(Matrix *mat, HYPRE_Int reqlen, HYPRE_Int *reqind,
HYPRE_Int *outlist) {
HYPRE_Int i, j, this_pe, mype;
hypre_MPI_Request request;
MPI_Comm comm = mat->comm;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
hypre_MPI_Comm_rank(comm, &mype);
mat->num_recv = 0;
/* Allocate recvbuf */
/* recvbuf has numlocal entires saved for local part of x, used in matvec */
mat->recvlen = reqlen; /* used for the transpose multiply */
mat->recvbuf =
hypre_TAlloc(HYPRE_Real, (reqlen + num_local), HYPRE_MEMORY_HOST);
for (i = 0; i < reqlen; i = j) /* j is set below */
{
/* The processor that owns the row with index reqind[i] */
this_pe = MatrixRowPe(mat, reqind[i]);
/* Figure out other rows we need from this_pe */
for (j = i + 1; j < reqlen; j++) {
/* if row is on different pe */
if (reqind[j] < mat->beg_rows[this_pe] ||
reqind[j] > mat->end_rows[this_pe])
break;
}
/* Request rows in reqind[i..j-1] */
hypre_MPI_Isend(&reqind[i], j - i, HYPRE_MPI_INT, this_pe, 444, comm,
&request);
hypre_MPI_Request_free(&request);
/* Count of number of number of indices needed from this_pe */
outlist[this_pe] = j - i;
hypre_MPI_Recv_init(&mat->recvbuf[i + num_local], j - i, hypre_MPI_REAL,
this_pe, 555, comm, &mat->recv_req[mat->num_recv]);
hypre_MPI_Send_init(&mat->recvbuf[i + num_local], j - i, hypre_MPI_REAL,
this_pe, 666, comm, &mat->send_req2[mat->num_recv]);
mat->num_recv++;
}
}
/*--------------------------------------------------------------------------
* SetupSends
* This function will wait for all receives to complete.
*--------------------------------------------------------------------------*/
static void SetupSends(Matrix *mat, HYPRE_Int *inlist) {
HYPRE_Int i, j, mype, npes;
hypre_MPI_Request *requests;
hypre_MPI_Status *statuses;
MPI_Comm comm = mat->comm;
hypre_MPI_Comm_rank(comm, &mype);
hypre_MPI_Comm_size(comm, &npes);
requests = hypre_TAlloc(hypre_MPI_Request, npes, HYPRE_MEMORY_HOST);
statuses = hypre_TAlloc(hypre_MPI_Status, npes, HYPRE_MEMORY_HOST);
/* Determine size of and allocate sendbuf and sendind */
mat->sendlen = 0;
for (i = 0; i < npes; i++)
mat->sendlen += inlist[i];
mat->sendbuf = NULL;
mat->sendind = NULL;
if (mat->sendlen) {
mat->sendbuf = hypre_TAlloc(HYPRE_Real, mat->sendlen, HYPRE_MEMORY_HOST);
mat->sendind = hypre_TAlloc(HYPRE_Int, mat->sendlen, HYPRE_MEMORY_HOST);
}
j = 0;
mat->num_send = 0;
for (i = 0; i < npes; i++) {
if (inlist[i] != 0) {
/* Post receive for the actual indices */
hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm,
&requests[mat->num_send]);
/* Set up the send */
hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 555,
comm, &mat->send_req[mat->num_send]);
/* Set up the receive for the transpose */
hypre_MPI_Recv_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 666,
comm, &mat->recv_req2[mat->num_send]);
mat->num_send++;
j += inlist[i];
}
}
hypre_MPI_Waitall(mat->num_send, requests, statuses);
free(requests);
free(statuses);
/* convert global indices to local indices */
/* these are all indices on this processor */
for (i = 0; i < mat->sendlen; i++)
mat->sendind[i] -= mat->beg_row;
}
/*--------------------------------------------------------------------------
* MatrixComplete
*--------------------------------------------------------------------------*/
void MatrixComplete(Matrix *mat) {
HYPRE_Int mype, npes;
HYPRE_Int *outlist, *inlist;
HYPRE_Int row, len, *ind;
HYPRE_Real *val;
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Comm_size(mat->comm, &npes);
mat->recv_req = hypre_TAlloc(hypre_MPI_Request, npes, HYPRE_MEMORY_HOST);
mat->send_req = hypre_TAlloc(hypre_MPI_Request, npes, HYPRE_MEMORY_HOST);
mat->recv_req2 = hypre_TAlloc(hypre_MPI_Request, npes, HYPRE_MEMORY_HOST);
mat->send_req2 = hypre_TAlloc(hypre_MPI_Request, npes, HYPRE_MEMORY_HOST);
mat->statuses = hypre_TAlloc(hypre_MPI_Status, npes, HYPRE_MEMORY_HOST);
outlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST);
inlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST);
/* Create Numbering object */
mat->numb = NumberingCreate(mat, PARASAILS_NROWS);
SetupReceives(mat, mat->numb->num_ind - mat->numb->num_loc,
&mat->numb->local_to_global[mat->numb->num_loc], outlist);
hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT,
mat->comm);
SetupSends(mat, inlist);
free(outlist);
free(inlist);
/* Convert to local indices */
for (row = 0; row <= mat->end_row - mat->beg_row; row++) {
MatrixGetRow(mat, row, &len, &ind, &val);
NumberingGlobalToLocal(mat->numb, len, ind, ind);
}
}
/*--------------------------------------------------------------------------
* MatrixMatvec
* Can be done in place.
*--------------------------------------------------------------------------*/
void MatrixMatvec(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) {
HYPRE_Int row, i, j, len, *ind;
HYPRE_Real *val, temp;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Put components of x into the right outgoing buffers */
for (i = 0; i < mat->sendlen; i++)
mat->sendbuf[i] = x[mat->sendind[i]];
hypre_MPI_Startall(mat->num_recv, mat->recv_req);
hypre_MPI_Startall(mat->num_send, mat->send_req);
/* Copy local part of x into top part of recvbuf */
for (i = 0; i < num_local; i++)
mat->recvbuf[i] = x[i];
hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses);
// fprintf(stderr, "MatrixMatvec\n");
#ifdef __ve__
HYPRE_Int s_ierr;
if (!mat->flag) {
// CSR data reordered according to multi-level scheduling
HYPRE_Int *_ia;
HYPRE_Int *_ja;
HYPRE_Real *_data;
// with multiple processes, it didn't create the handler ...
HYPRE_Int nnz = 0, ncol = num_local;
// HYPRE_Int n = mat->end_row - mat->beg_row + 1;
// fprintf(stderr, "Matrix n: %d\t", n);
for (i = mat->beg_row; i <= mat->end_row; i++)
nnz += mat->lens[i];
// fprintf(stderr, "nnz: %d\n", nnz);
_ia = (HYPRE_Int *)malloc(sizeof(HYPRE_Int) * (num_local + 1));
_ja = (HYPRE_Int *)malloc(sizeof(HYPRE_Int) * nnz);
_data = (HYPRE_Real *)malloc(sizeof(HYPRE_Real) * nnz);
_ia[0] = 0;
for (i = mat->beg_row; i <= mat->end_row; i++) {
_ia[i - mat->beg_row + 1] = _ia[i - mat->beg_row] + mat->lens[i];
HYPRE_Int *ja = mat->inds[i];
HYPRE_Real *vals = mat->vals[i];
for (j = 0; j < mat->lens[i]; j++) {
_ja[_ia[i - mat->beg_row] + j] = ja[j];
_data[_ia[i - mat->beg_row] + j] = vals[j];
if (ja[j] > ncol)
ncol = ja[j];
}
}
// fprintf(stderr,
// "ncols: %d\t nrows: %d\t nnz: %d\t(mat->recvlen + num_local): %d\n",
// ncol, num_local, nnz, mat->recvlen + num_local);
ncol = mat->recvlen + num_local;
s_ierr = sblas_create_matrix_handle_from_csr_rd(num_local, ncol, _ia, _ja,
_data, SBLAS_INDEXING_0,
SBLAS_GENERAL, &mat->hnd);
// fprintf(stderr, "create error: %d\n", s_ierr);
s_ierr = sblas_analyze_mv_rd(SBLAS_NON_TRANSPOSE, mat->hnd);
// fprintf(stderr, "analysis error: %d\n", s_ierr);
if (SBLAS_OK == s_ierr)
mat->flag = 1;
free(_ia);
free(_ja);
free(_data);
}
/* do the multiply */
s_ierr = sblas_execute_mv_rd(SBLAS_NON_TRANSPOSE, mat->hnd, 1.0, x,
0, y);
#else
/* do the multiply */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(row, len, ind, val, temp, i) schedule(static)
#endif
for (row = 0; row <= mat->end_row - mat->beg_row; row++) {
MatrixGetRow(mat, row, &len, &ind, &val);
temp = 0.0;
for (i = 0; i < len; i++) {
temp = temp + val[i] * mat->recvbuf[ind[i]];
}
y[row] = temp;
}
#endif
hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses);
}
void MatrixMatvecSerial(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) {
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val, temp;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Put components of x into the right outgoing buffers */
for (i = 0; i < mat->sendlen; i++)
mat->sendbuf[i] = x[mat->sendind[i]];
hypre_MPI_Startall(mat->num_recv, mat->recv_req);
hypre_MPI_Startall(mat->num_send, mat->send_req);
/* Copy local part of x into top part of recvbuf */
for (i = 0; i < num_local; i++)
mat->recvbuf[i] = x[i];
hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses);
/* do the multiply */
for (row = 0; row <= mat->end_row - mat->beg_row; row++) {
MatrixGetRow(mat, row, &len, &ind, &val);
temp = 0.0;
for (i = 0; i < len; i++) {
temp = temp + val[i] * mat->recvbuf[ind[i]];
}
y[row] = temp;
}
hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses);
}
/*--------------------------------------------------------------------------
* MatrixMatvecTrans
* Can be done in place.
*--------------------------------------------------------------------------*/
void MatrixMatvecTrans(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) {
HYPRE_Int row, i, j, len, *ind;
HYPRE_Real *val;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Post receives for local parts of the solution y */
hypre_MPI_Startall(mat->num_send, mat->recv_req2);
#ifdef __ve__
HYPRE_Int s_ierr;
if (!mat->t_flag) {
// CSR data reordered according to multi-level scheduling
HYPRE_Int *_ia;
HYPRE_Int *_ja;
HYPRE_Real *_data;
// with multiple processes, it didn't create the handler ...
HYPRE_Int nnz = 0, ncol = num_local;
// HYPRE_Int n = mat->end_row - mat->beg_row + 1;
// fprintf(stderr, "Matrix n: %d\t", n);
for (i = mat->beg_row; i <= mat->end_row; i++)
nnz += mat->lens[i];
// fprintf(stderr, "nnz: %d\n", nnz);
_ia = (HYPRE_Int *)malloc(sizeof(HYPRE_Int) * (num_local + 1));
_ja = (HYPRE_Int *)malloc(sizeof(HYPRE_Int) * nnz);
_data = (HYPRE_Real *)malloc(sizeof(HYPRE_Real) * nnz);
_ia[0] = 0;
for (i = mat->beg_row; i <= mat->end_row; i++) {
_ia[i - mat->beg_row + 1] = _ia[i - mat->beg_row] + mat->lens[i];
HYPRE_Int *ja = mat->inds[i];
HYPRE_Real *vals = mat->vals[i];
for (j = 0; j < mat->lens[i]; j++) {
_ja[_ia[i - mat->beg_row] + j] = ja[j];
_data[_ia[i - mat->beg_row] + j] = vals[j];
// if (ja[j] > ncol)
// ncol = ja[j];
}
}
// fprintf(stderr,
// "TRANS: ncols: %d\t nrows: %d\t nnz: %d \t (mat->recvlen + "
// "num_local): %d\n",
// ncol, num_local, nnz, mat->recvlen + num_local);
// fprintf(stderr,
// "TRANS: ncols: %d\t nrows: %d\t nnz: %d\n",
// ncol, num_local, nnz);
ncol = mat->recvlen + num_local;
s_ierr = sblas_create_matrix_handle_from_csr_rd(
num_local, ncol, _ia, _ja, _data, SBLAS_INDEXING_0, SBLAS_GENERAL,
&mat->t_hnd); // handler
s_ierr = sblas_analyze_mv_rd(SBLAS_TRANSPOSE, mat->t_hnd);
if (SBLAS_OK == s_ierr)
mat->t_flag = 1;
free(_ia);
free(_ja);
free(_data);
}
/* do the multiply */
s_ierr =
sblas_execute_mv_rd(SBLAS_TRANSPOSE, mat->t_hnd, 1.0, x, 0, mat->recvbuf);
#else
/* initialize accumulator buffer to zero */
for (i = 0; i < mat->recvlen + num_local; i++)
mat->recvbuf[i] = 0.0;
/* do the multiply */
for (row = 0; row <= mat->end_row - mat->beg_row; row++) {
MatrixGetRow(mat, row, &len, &ind, &val);
for (i = 0; i < len; i++) {
mat->recvbuf[ind[i]] += val[i] * x[row];
}
}
#endif
/* Now can send nonlocal parts of solution to other procs */
hypre_MPI_Startall(mat->num_recv, mat->send_req2);
/* copy local part of solution into y */
for (i = 0; i < num_local; i++)
y[i] = mat->recvbuf[i];
/* alternatively, loop over a wait any */
hypre_MPI_Waitall(mat->num_send, mat->recv_req2, mat->statuses);
/* add all the incoming partial sums to y */
for (i = 0; i < mat->sendlen; i++)
y[mat->sendind[i]] += mat->sendbuf[i];
hypre_MPI_Waitall(mat->num_recv, mat->send_req2, mat->statuses);
}
|
GB_unaryop__identity_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_int32
// op(A') function: GB_tran__identity_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_int32
(
int8_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
truedepsingleelement-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// race condition due to a[i]= .. --> .. a[0]
#include <stdlib.h>
int main (int argc, char* argv[])
{
int len=1000;
int i;
if (argc>1) len = atoi(argv[1]);
int a[len];
a[0] = 2;
#pragma omp parallel for
for (i=0;i<len;i++)
a[i]=a[i]+a[0];
return 0;
}
|
serial_tree_learner.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/random.h>
#include <string>
#include <cmath>
#include <cstdio>
#include <memory>
#include <random>
#include <vector>
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "split_info.hpp"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
using namespace json11;
namespace LightGBM {
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const Config* config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian,
Json& forced_split_json) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, const double* prediction,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, double prediction,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
protected:
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits();
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/* Force splits with forced_split_json dict and then return num splits forced.*/
virtual int32_t ForceSplits(Tree* tree, Json& forced_split_json, int* left_leaf,
int* right_leaf, int* cur_depth,
bool *aborted_last_force_split);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
double CalculateOndemandCosts(int feature_index, int leaf_index);
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
std::vector<int> valid_feature_indices_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
#endif
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
std::vector<bool> feature_used;
std::vector<uint32_t> feature_used_in_data;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
workspace.h | #ifndef Workspace_H
#define Workspace_H
#include "logger.h"
#include "matrix.h"
#include "Printer.h"
namespace puma {
class Workspace
{
public:
// --- Start Constructors --- //
Workspace(long x, long y, long z, short val, double voxelLength) {
log = new puma::Logger();
matrix.resize(x,y,z,val);
log->emptyLog();
this->voxelLength = voxelLength;
printer = new puma::Printer();
}
Workspace(long x, long y, long z, double voxelLength) {
log = new puma::Logger();
matrix.resize(x,y,z,0);
log->emptyLog();
this->voxelLength = voxelLength;
printer = new puma::Printer();
}
explicit Workspace(double voxelLength) {
log = new puma::Logger();
matrix.resize(0,0,0,0);
log->emptyLog();
this->voxelLength = voxelLength;
printer = new puma::Printer();
}
Workspace() {
log = new puma::Logger();
matrix.resize(0,0,0,0);
log->emptyLog();
this->voxelLength = 1e-6;
printer = new puma::Printer();
}
explicit Workspace(Workspace *other) {
log = new puma::Logger();
matrix.copy(&other->matrix);
log->emptyLog();
this->voxelLength = 1e-6;
printer = new puma::Printer();
}
explicit Workspace(const puma::Vec3<long>& shape) {
log = new puma::Logger();
matrix.resize(shape.x,shape.y,shape.z,0);
log->emptyLog();
this->voxelLength = 1e-6;
printer = new puma::Printer();
}
Workspace(long x, long y, long z, short val, double voxelLength, Logger *otherLog) {
log = otherLog;
matrix.resize(x,y,z,val);
this->voxelLength = voxelLength;
myLogger = false;
printer = new puma::Printer();
}
Workspace(long x, long y, long z, double voxelLength, Logger *otherLog) {
log = otherLog;
matrix.resize(x,y,z,0);
this->voxelLength = voxelLength;
myLogger = false;
printer = new puma::Printer();
}
Workspace(double voxelLength, Logger *otherLog) {
log = otherLog;
matrix.resize(0,0,0,0);
this->voxelLength = voxelLength;
myLogger = false;
printer = new puma::Printer();
}
explicit Workspace(Logger *otherLog) {
log = otherLog;
matrix.resize(0,0,0,0);
this->voxelLength = 1e-6;
myLogger = false;
printer = new puma::Printer();
}
Workspace(Workspace *other, Logger *otherLog) {
log = otherLog;
matrix.copy(&other->matrix);
this->voxelLength = 1e-6;
myLogger = false;
printer = new puma::Printer();
}
Workspace(const puma::Vec3<long>& shape, Logger *otherLog) {
log = otherLog;
matrix.resize(shape.x,shape.y,shape.z,0);
this->voxelLength = 1e-6;
myLogger = false;
printer = new puma::Printer();
}
Workspace(long x, long y, long z, short val, double voxelLength, bool logBool) {
log = new puma::Logger(logBool);
matrix.resize(x,y,z,val);
log->emptyLog();
this->voxelLength = voxelLength;
myLogger = false;
printer = new puma::Printer();
}
Workspace(long x, long y, long z, double voxelLength, bool logBool) {
log = new puma::Logger(logBool);
matrix.resize(x,y,z,0);
log->emptyLog();
this->voxelLength = voxelLength;
myLogger = false;
printer = new puma::Printer();
}
Workspace(double voxelLength, bool logBool) {
log = new puma::Logger(logBool);
matrix.resize(0,0,0,0);
log->emptyLog();
this->voxelLength = voxelLength;
myLogger = false;
printer = new puma::Printer();
}
explicit Workspace(bool logBool) {
log = new puma::Logger(logBool);
matrix.resize(0,0,0,0);
log->emptyLog();
this->voxelLength = 1e-6;
myLogger = false;
printer = new puma::Printer();
}
Workspace(Workspace *other, bool logBool) {
log = new puma::Logger(logBool);
matrix.copy(&other->matrix);
log->emptyLog();
this->voxelLength = 1e-6;
myLogger = false;
printer = new puma::Printer();
}
Workspace(const puma::Vec3<long>& shape, bool logBool) {
log = new puma::Logger(logBool);
matrix.resize(shape.x,shape.y,shape.z,0);
log->emptyLog();
this->voxelLength = 1e-6;
myLogger = false;
printer = new puma::Printer();
}
Workspace(const puma::Vec3<long>& shape, double voxelLength, bool logBool) {
log = new puma::Logger(logBool);
matrix.resize(shape.x,shape.y,shape.z,0);
log->emptyLog();
this->voxelLength = voxelLength;
myLogger = false;
printer = new puma::Printer();
}
~Workspace() {
if(myLogger) { delete log; }
if(myPrinter) { delete printer; }
}
// --- End Constructors --- //
// --- Start Variables --- //
puma::Matrix<short> matrix;
puma::Logger *log;
puma::Printer *printer;
bool myPrinter{true};
bool myLogger{true};
double voxelLength;
// --- End Variables --- //
// --- Start Functions --- //
void newWorkspace(double voxelLength) {
matrix.resize(0,0,0,0);
log->emptyLog();
this->voxelLength = voxelLength;
}
void setPrinter(puma::Printer *print) {
std::cout << "Printer changed to user input" << std::endl;
printer = print;
myPrinter = false;
}
void newPrinter() {
std::cout << "Printer returned to default" << std::endl;
printer = new puma::Printer();
myPrinter = true;
}
short operator() (long i, long j, long k) { return matrix(i,j,k); }
short& at(long i) { return matrix.at(i); }
short& at(long i, long j, long k) { return matrix.at(i,j,k); }
short& at_safe(long i) { return matrix.at_safe(i); }
short& at_safe(long i, long j, long k) { return matrix.at_safe(i,j,k); }
long size() { return matrix.size(); }
long X() { return matrix.X(); }
long Y() { return matrix.Y(); }
long Z() { return matrix.Z(); }
puma::Vec3<long> shape() { return puma::Vec3<long>(matrix.X(),matrix.Y(),matrix.Z()); }
puma::Vec3<long> getShape() { return puma::Vec3<long>(matrix.X(),matrix.Y(),matrix.Z()); }
long getLength() { return matrix.size(); }
long getSize() { return matrix.size(); }
long getX() { return matrix.X(); }
long getY() { return matrix.Y(); }
long getZ() { return matrix.Z(); }
short min() { return matrix.min(); }
short max() { return matrix.max(); }
double average() { return matrix.average(); }
bool crop(long x1, long x2, long y1, long y2, long z1, long z2) {
return matrix.crop(x1,x2,y1,y2,z1,z2);
}
void setSize(long X, long Y, long Z) {
if( !( X>0 && Y>0 && Z>0 ) ) {
std::cout << "Invalid size. X, Y, and Z must be >0" << std::endl;
return;
}
matrix.resize(X,Y,Z,0);
}
void resize(long X, long Y, long Z) {
if( !( X>0 && Y>0 && Z>0 ) ) {
std::cout << "Invalid size. X, Y, and Z must be >0" << std::endl;
return;
}
matrix.resize(X,Y,Z,0);
}
void setMaterialID(puma::Cutoff cutoff, int identifier) {
if(identifier < 0) {
return;
}
if(identifier > 1000) {
return;
}
int X = (int)matrix.X();
int Y = (int)matrix.Y();
int Z = (int)matrix.Z();
#pragma omp parallel for
for(long i=0; i<X; i++) {
for(long j=0; j<Y; j++) {
for(long k=0; k<Z; k++) {
short value = matrix(i,j,k);
if(value <= cutoff.second && value >= cutoff.first) {
matrix(i,j,k) = identifier;
}
}
}
}
}
void setMaterialID(Workspace *other, puma::Cutoff cutoff, int identifier) {
if(identifier < 0) {
return;
}
if(identifier > 1000) {
return;
}
int X = (int)matrix.X();
int Y = (int)matrix.Y();
int Z = (int)matrix.Z();
#pragma omp parallel for
for(long i=0; i<X; i++) {
for(long j=0; j<Y; j++) {
for(long k=0; k<Z; k++) {
short value = other->matrix(i,j,k);
if(value <= cutoff.second && value >= cutoff.first) {
matrix(i,j,k) = identifier;
}
}
}
}
}
// --- End Functions --- //
};
}
#endif // Workspace
|
GxB_Type_name.c | //------------------------------------------------------------------------------
// GxB_Type_name: return the name of a type
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Type_name // return the name of a GraphBLAS type
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
GrB_Type type
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Type_name (type_name, type)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (type) ;
//--------------------------------------------------------------------------
// return the type_name
//--------------------------------------------------------------------------
memcpy (type_name, type->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
trmm.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* trmm.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "trmm.h"
/* Array initialization. */
static
void init_array(int m, int n,
DATA_TYPE *alpha,
DATA_TYPE POLYBENCH_2D(A, M, M, m, m),
DATA_TYPE POLYBENCH_2D(B, M, N, m, n))
{
int i, j;
*alpha = 1.5;
for (i = 0; i < m; i++)
{
for (j = 0; j < i; j++)
{
A[i][j] = (DATA_TYPE)((i + j) % m) / m;
}
A[i][i] = 1.0;
for (j = 0; j < n; j++)
{
B[i][j] = (DATA_TYPE)((n + (i - j)) % n) / n;
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m, int n,
DATA_TYPE POLYBENCH_2D(B, M, N, m, n))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("B");
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
{
if ((i * m + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, B[i][j]);
}
POLYBENCH_DUMP_END("B");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_trmm(int m, int n,
DATA_TYPE alpha,
DATA_TYPE POLYBENCH_2D(A, M, M, m, m),
DATA_TYPE POLYBENCH_2D(B, M, N, m, n))
{
int i, j, k;
for (i = 0; i < _PB_M; i++)
{
#pragma omp parallel for default(shared) private(j, k) firstprivate(n, i, m, alpha, A)
for (j = 0; j < _PB_N; j++)
{
for (k = i + 1; k < _PB_M; k++)
B[i][j] += A[k][i] * B[k][j];
B[i][j] = alpha * B[i][j];
}
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int m = M;
int n = N;
/* Variable declaration/allocation. */
DATA_TYPE alpha;
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, M, M, m, m);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, M, N, m, n);
/* Initialize array(s). */
init_array (m, n, &alpha, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_trmm (m, n, alpha, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, n, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
o10glogon_fmt_plug.c | /*
* This software was written by JimF jfoug AT cox dot net
* in 2016. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2016 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
* This is oracle O10g-logon format. NOTE, if the hashes came from a
* Oracle 10g, and the hash data can be sniffed from network traffic
* TNS records.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_o10glogon;
#elif FMT_REGISTERS_H
john_register_one(&fmt_o10glogon);
#else
#include <string.h>
#include <openssl/des.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "aes.h"
#include "md5.h"
#include "unicode.h"
#include "base64_convert.h"
#include "memdbg.h"
#define FORMAT_LABEL "o10glogon"
#define FORMAT_NAME "Oracle 10g-logon protocol"
#define FORMAT_TAG "$o10glogon$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES-AES128-MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MAX_USERNAME_LEN 30
#define SALT_SIZE (sizeof(ora10g_salt))
#define SALT_ALIGN (sizeof(unsigned int))
#define CIPHERTEXT_LENGTH 16
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_HASH_LEN (FORMAT_TAG_LEN+MAX_USERNAME_LEN+1+64+1+64+1+160)
//#define DEBUG_ORACLE
//
// The keys are $o10glogon$oracle-user-name$auth_sess_key$auth_sess_key_c$auth_password
// These can be found in sniffed network traffic.
static struct fmt_tests tests[] = {
{"$o10glogon$jimf$6DA8BE6D9713B7F9190DC0F87F1BB1BDFFE44EB1892E40915592980ECCE60AA3$1C08586339E5806DD45CF8E6D83CC6EA2B8CDCDE7CC9F00ADF43DA0F07309090$E2F3D778138213BF01FD743F2092FC976FD60AB2C9F4A1B1D9B08439325421B1", "JimF"},
{"$o10glogon$SESA218390$3B16F14C3DC6048C993000E2BF543BAB489DF7BD8D6061B7274CC9E1DB743E08$1695D5255EDF15CA6B1F14C5CB39C72C98E2CC2B62FB3224ECA5A6A6790511D4$F0F64E384E567F44E9DF8D7F4C029AA59770FA75094F1C26A66C45AFA9913987", "jimf"},
{"$o10glogon$TESTUSER$EEABE812530C6D4432F781DFC14A7C7F81EAE1804F340D3289732477FD351FCC$7B244D7A1DB5ABE553FB9B7325110024911FCBE95EF99E7965A754BC41CF31C0$4C5E28E66B6382117F9D41B08957A3B9E363B42760C33B44CA5D53EA90204ABE", "TESTPASS"},
{NULL}
};
typedef struct ora10g_salt_t {
int userlen, auth_pass_len;
UTF16 user[MAX_USERNAME_LEN+1];
unsigned char auth_sesskey[32];
unsigned char auth_sesskey_c[32];
unsigned char auth_pass[80];
} ora10g_salt;
static ora10g_salt *cur_salt;
static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1];
static char (*plain_key)[PLAINTEXT_LENGTH + 1];
static int *cur_key_len;
static int *cracked, any_cracked;
static DES_key_schedule desschedule1; // key 0x0123456789abcdef
static void init(struct fmt_main *self)
{
DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule1);
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
cur_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key));
plain_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*plain_key));
cur_key_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key_len));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(cur_key_len);
MEM_FREE(plain_key);
MEM_FREE(cur_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *cp;
char tmp[32*5+1];
UTF16 cur_key_mixedcase[MAX_USERNAME_LEN+2];
int len, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ciphertext += FORMAT_TAG_LEN;
cp = strchr(ciphertext, '$');
if (!cp)
return 0;
// make sure username fits in MAX_USERNAME_LEN UTF16
if (cp-ciphertext > sizeof(tmp)-1)
return 0;
memcpy(tmp, ciphertext, cp-ciphertext);
tmp[cp-ciphertext] = 0;
len = enc_to_utf16((UTF16 *)cur_key_mixedcase, MAX_USERNAME_LEN+1, (unsigned char*)tmp, strlen(tmp));
if (len < 0 || (len == 0 && cp-ciphertext)) {
static int error_shown = 0;
#ifdef HAVE_FUZZ
if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK))
return 0;
#endif
if (!error_shown)
fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label);
error_shown = 1;
return 0;
}
if (len > MAX_USERNAME_LEN)
return 0;
ciphertext = cp+1;
cp = strchr(ciphertext, '$');
if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64)
return 0;
ciphertext = cp+1;
cp = strchr(ciphertext, '$');
if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64)
return 0;
ciphertext = cp+1;
len = strlen(ciphertext);
cp = strchr(ciphertext, '$');
if (!len || cp || len%16 || hexlenu(ciphertext, &extra) != len || extra)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[MAX_HASH_LEN*5+1];
strnzcpy(out, ciphertext, MAX_HASH_LEN+1);
enc_strupper(&out[FORMAT_TAG_LEN]);
return out;
}
static void set_salt(void *salt) {
cur_salt = (ora10g_salt *)salt;
}
static void oracle_set_key(char *key, int index) {
UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1];
UTF16 *c;
int key_length;
strcpy(plain_key[index], key);
// Can't use enc_to_utf16_be() because we need to do utf16_uc later
key_length = enc_to_utf16(cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key));
if (key_length < 0)
key_length = strlen16(cur_key_mixedcase);
// We convert and uppercase in one shot
key_length = utf16_uc(cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length);
// we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase,
// and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves.
if (key_length < 0)
key_length *= -1;
cur_key_len[index] = key_length * sizeof(UTF16);
// Now byte-swap to UTF16-BE
c = cur_key[index];
while((*c = *c << 8 | *c >> 8))
c++;
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_key ", (unsigned char*)cur_key[index], cur_key_len[index]);
#endif
}
static char *get_key(int index) {
return plain_key[index];
}
static void ORACLE_TNS_Decrypt_AES128_CBC (unsigned char aes_key_bytes[16], unsigned char* input, int input_len, unsigned char* output)
{
unsigned char iv[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
AES_KEY key;
AES_set_decrypt_key(aes_key_bytes, 128, &key);
AES_cbc_encrypt(input, output, input_len, &key, iv, AES_DECRYPT);
}
static int terminate_ascii_string (char* ascii_string_not_terminated, int len)
{
int ascii_len = 0;
unsigned char padding_byte;
int pos;
for (pos=0; ; pos++) {
if ((ascii_string_not_terminated[pos] < 32) |
(ascii_string_not_terminated[pos] > 126))
break;
}
ascii_len = pos;
padding_byte = ascii_string_not_terminated[pos];
for (;pos<len; pos++) {
if (ascii_string_not_terminated[pos] != padding_byte)
return -1;
}
ascii_string_not_terminated[ascii_len] = 0;
return ascii_len;
}
static void ORACLE_TNS_Combine_SessKeys (unsigned char server_sesskey[16], unsigned char client_sesskey[16], unsigned char* output)
{
unsigned char combined_sesskeys[16];
int i;
MD5_CTX ctx;
for (i=0;i<16;i++)
combined_sesskeys[i] = server_sesskey[i] ^ client_sesskey[i];
MD5_Init (&ctx);
MD5_Update (&ctx, combined_sesskeys,16);
MD5_Final (output, &ctx);
}
static int ORACLE_TNS_Decrypt_Password_10g (unsigned char OracleHash[8], unsigned char *auth_sesskey, unsigned char *auth_sesskey_c, unsigned char *auth_password, int auth_passwordlen, unsigned char *decrypted)
{
int passlen = 0;
unsigned char aes_key_bytes[32];
unsigned char decrypted_server_sesskey[32];
unsigned char decrypted_client_sesskey[32];
unsigned char combined_sesskeys[16];
char decrypted_password[64];
memset (aes_key_bytes,0,sizeof(aes_key_bytes));
memcpy (aes_key_bytes,OracleHash,8);
// Decrypt server and client session keys
ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey, 32, decrypted_server_sesskey);
ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey_c, 32, decrypted_client_sesskey);
// Combine server and client session keys
ORACLE_TNS_Combine_SessKeys (&decrypted_server_sesskey[16], &decrypted_client_sesskey[16], combined_sesskeys);
// Decrypt auth password with combined session key
ORACLE_TNS_Decrypt_AES128_CBC (combined_sesskeys, auth_password, auth_passwordlen, (unsigned char*) decrypted_password);
// terminate decrypted password with NULL
passlen = terminate_ascii_string (&decrypted_password[16], auth_passwordlen-16);
if (passlen != -1)
strncpy ((char*)decrypted, &decrypted_password[16], passlen);
return passlen;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int idx = 0;
if (any_cracked) {
memset(cracked, 0, sizeof(*cracked) * count);
any_cracked = 0;
}
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_salt ", buf, cur_salt->userlen+key_length);
#endif
#ifdef _OPENMP
#pragma omp parallel for
for (idx = 0; idx < count; idx++)
#endif
{
unsigned char buf[256], buf1[256];
unsigned int l;
uint32_t iv[2];
DES_key_schedule desschedule2;
l = cur_salt->userlen + cur_key_len[idx];
memcpy(buf, cur_salt->user, cur_salt->userlen);
memcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]);
iv[0] = iv[1] = 0;
DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT);
DES_set_key((DES_cblock *)iv, &desschedule2);
iv[0] = iv[1] = 0;
DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT);
#ifdef DEBUG_ORACLE
dump_stuff_msg(" iv (the hash key) ", (unsigned char*)&iv[0], 8);
#endif
ORACLE_TNS_Decrypt_Password_10g ((unsigned char*)iv, cur_salt->auth_sesskey, cur_salt->auth_sesskey_c, cur_salt->auth_pass, cur_salt->auth_pass_len, buf);
if (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx])))
{
cracked[idx] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static void *get_salt(char *ciphertext)
{
static ora10g_salt salt;
UTF8 tmp[MAX_USERNAME_LEN*5+1];
char *cp;
memset(&salt, 0, sizeof(salt));
ciphertext += FORMAT_TAG_LEN;
cp = strchr(ciphertext, '$');
strncpy((char*)tmp, ciphertext, cp-ciphertext);
tmp[cp-ciphertext] = 0;
salt.userlen = enc_to_utf16_be(salt.user, MAX_USERNAME_LEN, tmp, cp-ciphertext);
if (salt.userlen < 0)
salt.userlen = strlen16(salt.user);
salt.userlen *= 2;
base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey,e_b64_raw,32,0,0);
cp = strchr(cp+1, '$');
base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey_c,e_b64_raw,32,0,0);
cp = strchr(cp+1, '$') + 1;
salt.auth_pass_len = strlen(cp)/2;
base64_convert(cp,e_b64_hex,salt.auth_pass_len*2,salt.auth_pass,e_b64_raw,salt.auth_pass_len,0,0);
return &salt;
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *s = ((UTF16*)salt) + 1;
unsigned int hash = 5381;
while (*s)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int count)
{
return cracked[count];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_o10glogon = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
oracle_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__pow_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pow_int64
// A.*B function (eWiseMult): GB_AemultB__pow_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__pow_int64
// C+=b function (dense accum): GB_Cdense_accumb__pow_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_int64
// C=scalar+B GB_bind1st__pow_int64
// C=scalar+B' GB_bind1st_tran__pow_int64
// C=A+scalar GB_bind2nd__pow_int64
// C=A'+scalar GB_bind2nd_tran__pow_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_pow_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_pow_int64 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT64 || GxB_NO_POW_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pow_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pow_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pow_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__pow_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pow_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__pow_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = GB_pow_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__pow_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = GB_pow_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_int64 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__pow_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_int64 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__pow_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
const Image
*next;
FxInfo
*fx_info;
ssize_t
i;
unsigned char
fx_op[2];
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(unsigned char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op);
*fx_op=(unsigned char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op);
*fx_op=(unsigned char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op);
*fx_op=(unsigned char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op);
*fx_op=(unsigned char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op);
*fx_op=(unsigned char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op);
*fx_op=(unsigned char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op);
*fx_op=(unsigned char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op);
*fx_op=(unsigned char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op);
*fx_op=(unsigned char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",(char *) fx_op);
*fx_op=(unsigned char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op);
*fx_op=(unsigned char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",(char *) fx_op);
*fx_op=(unsigned char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op);
*fx_op=(unsigned char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",(char *) fx_op);
*fx_op=(unsigned char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",(char *) fx_op);
*fx_op=(unsigned char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op);
*fx_op=(unsigned char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op);
*fx_op=(unsigned char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",(char *) fx_op);
*fx_op=(unsigned char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",(char *) fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info,
const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
double const value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireMagickMemory(sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent];
const double
*value;
double
statistic;
const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*(*value));
}
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"median",6) == 0)
{
double
median;
(void) GetImageMedian(image,&median,exception);
statistic=median;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=standard_deviation;
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace((int) ((unsigned char) c)) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static inline double FxGCD(const double alpha,const double beta)
{
if (alpha < beta)
return(FxGCD(beta,alpha));
if (fabs(beta) < 0.001)
return(alpha);
return(FxGCD(beta,alpha-beta*floor(alpha/beta)));
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*artifact,
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
PointInfo
point;
ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
size_t
length;
(void) CopyMagickString(name,p,MagickPathExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=length;
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,
ConstantString(name),ClonePixelInfo(&pixel));
p+=length;
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"median",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
artifact=GetImageArtifact(image,symbol);
if (artifact != (const char *) NULL)
return(StringToDouble(artifact,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
const char
*subexpression;
int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit((int) ((unsigned char) c)) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit((int) ((unsigned char) c)) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha((int) ((unsigned char) c)) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseConditional(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
if (strlen(q) == 1) \
*(q+1)='\0'; \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent-1);
FxParseConditional(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="alpha"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedPixelChannel: type="gray"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
double
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
if (IsNaN(alpha))
FxReturn(alpha);
gcd=FxGCD(alpha,*beta);
FxReturn(gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse if(condition test, true-expression, false-expression).
*/
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
/*
Round the fraction to nearest integer.
*/
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition test, expression).
*/
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,
beta,exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows, \
GlobExpression(fx_info[0]->expression,"debug(",MagickTrue) == 0 ? 1 : 0)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
DRB048-firstprivate-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example use of firstprivate()
*/
void foo(int * a, int n, int g)
{
int i;
#pragma cetus private(i)
#pragma loop name foo#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<n; i ++ )
{
a[i]=(a[i]+g);
}
return ;
}
int a[100];
int main()
{
int i;
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<100; i ++ )
{
a[i]=i;
}
foo(a, 100, 7);
#pragma cetus private(i)
#pragma loop name main#1
for (i=0; i<100; i ++ )
{
printf("%d\n", a[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
|
TSDFVoxelGridImpl.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <atomic>
#include <cmath>
#include "open3d/core/Dispatch.h"
#include "open3d/core/Dtype.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/t/geometry/Utility.h"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/geometry/kernel/TSDFVoxel.h"
#include "open3d/t/geometry/kernel/TSDFVoxelGrid.h"
#include "open3d/utility/Logging.h"
#include "open3d/utility/Timer.h"
namespace open3d {
namespace t {
namespace geometry {
namespace kernel {
namespace tsdf {
#if defined(__CUDACC__)
void IntegrateCUDA
#else
void IntegrateCPU
#endif
(const core::Tensor& depth,
const core::Tensor& color,
const core::Tensor& indices,
const core::Tensor& block_keys,
core::Tensor& block_values,
// Transforms
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
// Parameters
int64_t resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_max) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size);
// Real data indexer
NDArrayIndexer depth_indexer(depth, 2);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
// Optional color integration
NDArrayIndexer color_indexer;
bool integrate_color = false;
if (color.NumElements() != 0) {
color_indexer = NDArrayIndexer(color, 2);
integrate_color = true;
}
// Plain arrays that does not require indexers
const int* indices_ptr = indices.GetDataPtr<int>();
int64_t n = indices.GetLength() * resolution3;
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
depth.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int block_idx =
indices_ptr[workload_idx / resolution3];
int voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(
block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv,
&zv);
// coordinate in world (in voxel)
int64_t x = (xb * resolution + xv);
int64_t y = (yb * resolution + yv);
int64_t z = (zb * resolution + zv);
// coordinate in camera (in voxel -> in meter)
float xc, yc, zc, u, v;
transform_indexer.RigidTransform(
static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(z), &xc, &yc, &zc);
// coordinate in image (in pixel)
transform_indexer.Project(xc, yc, zc, &u, &v);
if (!depth_indexer.InBoundary(u, v)) {
return;
}
// Associate image workload and compute SDF and
// TSDF.
float depth = *depth_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v)) /
depth_scale;
float sdf = (depth - zc);
if (depth <= 0 || depth > depth_max || zc <= 0 ||
sdf < -sdf_trunc) {
return;
}
sdf = sdf < sdf_trunc ? sdf : sdf_trunc;
sdf /= sdf_trunc;
// Associate voxel workload and update TSDF/Weights
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
if (integrate_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v));
voxel_ptr->Integrate(sdf, color_ptr[0],
color_ptr[1],
color_ptr[2]);
} else {
voxel_ptr->Integrate(sdf);
}
});
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
#if defined(__CUDACC__)
void ExtractSurfacePointsCUDA
#else
void ExtractSurfacePointsCPU
#endif
(const core::Tensor& indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& points,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& valid_size) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
int64_t n_blocks = indices.GetLength();
int64_t n = n_blocks * resolution3;
// Output
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
if (valid_size < 0) {
utility::LogWarning(
"No estimated max point cloud size provided, using a 2-pass "
"estimation. Surface extraction could be slow.");
// This pass determines valid number of points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
indices.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo,
int zo,
int curr_block_idx)
-> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx,
// voxel_idx)
int64_t workload_block_idx =
workload_idx / resolution3;
int64_t block_idx =
indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv,
&yv, &zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
// Enumerate x-y-z directions
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(
workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
}
});
});
#if defined(__CUDACC__)
valid_size = count[0].Item<int>();
count[0] = 0;
#else
valid_size = (*count_ptr).load();
(*count_ptr) = 0;
#endif
}
int max_count = valid_size;
if (points.GetLength() == 0) {
points = core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
NDArrayIndexer point_indexer(points, 1);
// Normals
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
if (normals.value().get().GetLength() == 0) {
normals.value().get() = core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
// This pass extracts exact surface points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
// Colors
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
if (colors.value().get().GetLength() == 0) {
colors.value().get() =
core::Tensor({max_count, 3}, core::Float32,
block_values.GetDevice());
}
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
core::ParallelFor(
indices.GetDevice(), n,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAt =
[&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx,
float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution),
voxel_size, nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx =
workload_idx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(
block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv,
&zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(xv, yv, zv,
block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
float no[3] = {0}, ni[3] = {0};
if (extract_normal) {
GetNormalAt(
static_cast<int>(xv),
static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx),
no);
}
// Enumerate x-y-z axis
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
float ratio =
(0 - tsdf_o) / (tsdf_i - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
if (idx >= valid_size) {
printf("Point cloud size larger than "
"estimated, please increase the "
"estimation!\n");
return;
}
float* point_ptr =
point_indexer.GetDataPtr<float>(
idx);
point_ptr[0] = voxel_size *
(x + ratio * int(i == 0));
point_ptr[1] = voxel_size *
(y + ratio * int(i == 1));
point_ptr[2] = voxel_size *
(z + ratio * int(i == 2));
if (extract_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(
idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_i = ptr->GetR();
float g_i = ptr->GetG();
float b_i = ptr->GetB();
color_ptr[0] = ((1 - ratio) * r_o +
ratio * r_i) /
255.0f;
color_ptr[1] = ((1 - ratio) * g_o +
ratio * g_i) /
255.0f;
color_ptr[2] = ((1 - ratio) * b_o +
ratio * b_i) /
255.0f;
}
if (extract_normal) {
GetNormalAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(
workload_block_idx),
ni);
float* normal_ptr =
normal_indexer
.GetDataPtr<float>(idx);
float nx = (1 - ratio) * no[0] +
ratio * ni[0];
float ny = (1 - ratio) * no[1] +
ratio * ni[1];
float nz = (1 - ratio) * no[2] +
ratio * ni[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny +
nz * nz) +
1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
}
}
});
});
#if defined(__CUDACC__)
int total_count = count.Item<int>();
#else
int total_count = (*count_ptr).load();
#endif
utility::LogDebug("{} vertices extracted", total_count);
valid_size = total_count;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
#if defined(__CUDACC__)
void ExtractSurfaceMeshCUDA
#else
void ExtractSurfaceMeshCPU
#endif
(const core::Tensor& indices,
const core::Tensor& inv_indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& vertices,
core::Tensor& triangles,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& vertex_count) {
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
int n_blocks = static_cast<int>(indices.GetLength());
// TODO(wei): profile performance by replacing the table to a hashmap.
// Voxel-wise mesh info. 4 channels correspond to:
// 3 edges' corresponding vertex index + 1 table index.
core::Tensor mesh_structure;
try {
mesh_structure = core::Tensor::Zeros(
{n_blocks, resolution, resolution, resolution, 4}, core::Int32,
block_keys.GetDevice());
} catch (const std::runtime_error&) {
utility::LogError(
"[MeshExtractionKernel] Unable to allocate assistance mesh "
"structure for Marching "
"Cubes with {} active voxel blocks. Please consider using a "
"larger voxel size (currently {}) for TSDF "
"integration, or using tsdf_volume.cpu() to perform mesh "
"extraction on CPU.",
n_blocks, voxel_size);
}
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer mesh_structure_indexer(mesh_structure, 4);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>();
int64_t n = n_blocks * resolution3;
int64_t voxel_bytesize = voxel_block_buffer_indexer.ElementByteSize();
// Pass 0: analyze mesh structure, set up one-on-one correspondences
// from edges to vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Check per-vertex sign in the cube to determine cube
// type
int table_idx = 0;
for (int i = 0; i < 8; ++i) {
voxel_t* voxel_ptr_i = GetVoxelAt(
static_cast<int>(xv) + vtx_shifts[i][0],
static_cast<int>(yv) + vtx_shifts[i][1],
static_cast<int>(zv) + vtx_shifts[i][2],
static_cast<int>(workload_block_idx));
if (voxel_ptr_i == nullptr) return;
float tsdf_i = voxel_ptr_i->GetTSDF();
float weight_i = voxel_ptr_i->GetWeight();
if (weight_i <= weight_threshold) return;
table_idx |= ((tsdf_i < 0) ? (1 << i) : 0);
}
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
mesh_struct_ptr[3] = table_idx;
if (table_idx == 0 || table_idx == 255) return;
// Check per-edge sign determine the cube type
int edges_with_vertices = edge_table[table_idx];
for (int i = 0; i < 12; ++i) {
if (edges_with_vertices & (1 << i)) {
int64_t xv_i = xv + edge_shifts[i][0];
int64_t yv_i = yv + edge_shifts[i][1];
int64_t zv_i = zv + edge_shifts[i][2];
int edge_i = edge_shifts[i][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx =
(dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer
.GetDataPtr<int64_t>(
workload_block_idx,
nb_idx);
int* mesh_ptr_i =
mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution,
yv_i - dyb * resolution,
zv_i - dzb * resolution,
inv_indices_ptr[block_idx_i]);
// Non-atomic write, but we are safe
mesh_ptr_i[edge_i] = -1;
}
}
});
});
// Pass 1: determine valid number of vertices (if not preset)
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
if (vertex_count < 0) {
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
});
#if defined(__CUDACC__)
vertex_count = count.Item<int>();
#else
vertex_count = (*count_ptr).load();
#endif
}
utility::LogDebug("Total vertex count = {}", vertex_count);
vertices = core::Tensor({vertex_count, 3}, core::Float32,
block_values.GetDevice());
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
normals.value().get() = core::Tensor({vertex_count, 3}, core::Float32,
block_values.GetDevice());
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer vertex_indexer(vertices, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
// Pass 2: extract vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
colors.value().get() = core::Tensor(
{vertex_count, 3}, core::Float32, block_values.GetDevice());
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
core::ParallelFor(
indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo,
int curr_block_idx,
float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution), voxel_size,
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = widx % resolution3;
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// global coordinate (in voxels)
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr =
mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Obtain voxel ptr
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float no[3] = {0}, ne[3] = {0};
if (extract_normal) {
GetNormalAt(static_cast<int>(xv), static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx), no);
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
voxel_t* voxel_ptr_e = GetVoxelAt(
static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx));
OPEN3D_ASSERT(
voxel_ptr_e != nullptr &&
"Internal error: GetVoxelAt returns nullptr.");
float tsdf_e = voxel_ptr_e->GetTSDF();
float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
mesh_struct_ptr[e] = idx;
float ratio_x = ratio * int(e == 0);
float ratio_y = ratio * int(e == 1);
float ratio_z = ratio * int(e == 2);
float* vertex_ptr =
vertex_indexer.GetDataPtr<float>(idx);
vertex_ptr[0] = voxel_size * (x + ratio_x);
vertex_ptr[1] = voxel_size * (y + ratio_y);
vertex_ptr[2] = voxel_size * (z + ratio_z);
if (extract_normal) {
float* normal_ptr =
normal_indexer.GetDataPtr<float>(idx);
GetNormalAt(static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx),
ne);
float nx = (1 - ratio) * no[0] + ratio * ne[0];
float ny = (1 - ratio) * no[1] + ratio * ne[1];
float nz = (1 - ratio) * no[2] + ratio * ne[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny + nz * nz) + 1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
if (extract_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_e = voxel_ptr_e->GetR();
float g_e = voxel_ptr_e->GetG();
float b_e = voxel_ptr_e->GetB();
color_ptr[0] =
((1 - ratio) * r_o + ratio * r_e) / 255.0f;
color_ptr[1] =
((1 - ratio) * g_o + ratio * g_e) / 255.0f;
color_ptr[2] =
((1 - ratio) * b_o + ratio * b_e) / 255.0f;
}
}
});
});
// Pass 3: connect vertices and form triangles.
int triangle_count = vertex_count * 3;
triangles = core::Tensor({triangle_count, 3}, core::Int64,
block_values.GetDevice());
NDArrayIndexer triangle_indexer(triangles, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
core::ParallelFor(indices.GetDevice(), n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
int table_idx = mesh_struct_ptr[3];
if (tri_count[table_idx] == 0) return;
for (size_t tri = 0; tri < 16; tri += 3) {
if (tri_table[table_idx][tri] == -1) return;
int tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
for (size_t vertex = 0; vertex < 3; ++vertex) {
int edge = tri_table[table_idx][tri + vertex];
int64_t xv_i = xv + edge_shifts[edge][0];
int64_t yv_i = yv + edge_shifts[edge][1];
int64_t zv_i = zv + edge_shifts[edge][2];
int64_t edge_i = edge_shifts[edge][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer.GetDataPtr<int64_t>(
workload_block_idx, nb_idx);
int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution, yv_i - dyb * resolution,
zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]);
int64_t* triangle_ptr =
triangle_indexer.GetDataPtr<int64_t>(tri_idx);
triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i];
}
}
});
#if defined(__CUDACC__)
triangle_count = count.Item<int>();
#else
triangle_count = (*count_ptr).load();
#endif
utility::LogInfo("Total triangle count = {}", triangle_count);
triangles = triangles.Slice(0, 0, triangle_count);
}
#if defined(__CUDACC__)
void EstimateRangeCUDA
#else
void EstimateRangeCPU
#endif
(const core::Tensor& block_keys,
core::Tensor& range_minmax_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int down_factor,
int64_t block_resolution,
float voxel_size,
float depth_min,
float depth_max) {
// TODO(wei): reserve it in a reusable buffer
// Every 2 channels: (min, max)
int h_down = h / down_factor;
int w_down = w / down_factor;
range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Float32,
block_keys.GetDevice());
NDArrayIndexer range_map_indexer(range_minmax_map, 2);
// Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max)
const int fragment_size = 16;
const int frag_buffer_size = 65535;
// TODO(wei): explicit buffer
core::Tensor fragment_buffer = core::Tensor(
{frag_buffer_size, 6}, core::Float32, block_keys.GetDevice());
NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1);
NDArrayIndexer block_keys_indexer(block_keys, 1);
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Int32,
block_keys.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
#ifndef __CUDACC__
using std::max;
using std::min;
#endif
// Pass 0: iterate over blocks, fill-in an rendering fragment array
core::ParallelFor(
block_keys.GetDevice(), block_keys.GetLength(),
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int* key = block_keys_indexer.GetDataPtr<int>(workload_idx);
int u_min = w_down - 1, v_min = h_down - 1, u_max = 0,
v_max = 0;
float z_min = depth_max, z_max = depth_min;
float xc, yc, zc, u, v;
// Project 8 corners to low-res image and form a rectangle
for (int i = 0; i < 8; ++i) {
float xw = (key[0] + ((i & 1) > 0)) * block_resolution *
voxel_size;
float yw = (key[1] + ((i & 2) > 0)) * block_resolution *
voxel_size;
float zw = (key[2] + ((i & 4) > 0)) * block_resolution *
voxel_size;
w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc,
&zc);
if (zc <= 0) continue;
// Project to the down sampled image buffer
w2c_transform_indexer.Project(xc, yc, zc, &u, &v);
u /= down_factor;
v /= down_factor;
v_min = min(static_cast<int>(floorf(v)), v_min);
v_max = max(static_cast<int>(ceilf(v)), v_max);
u_min = min(static_cast<int>(floorf(u)), u_min);
u_max = max(static_cast<int>(ceilf(u)), u_max);
z_min = min(z_min, zc);
z_max = max(z_max, zc);
}
v_min = max(0, v_min);
v_max = min(h_down - 1, v_max);
u_min = max(0, u_min);
u_max = min(w_down - 1, u_max);
if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return;
// Divide the rectangle into small 16x16 fragments
int frag_v_count =
ceilf(float(v_max - v_min + 1) / float(fragment_size));
int frag_u_count =
ceilf(float(u_max - u_min + 1) / float(fragment_size));
int frag_count = frag_v_count * frag_u_count;
int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1);
int frag_count_end = frag_count_start + frag_count;
if (frag_count_end >= frag_buffer_size) {
printf("Fragment count exceeding buffer size, abort!\n");
}
int offset = 0;
for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) {
for (int frag_u = 0; frag_u < frag_u_count;
++frag_u, ++offset) {
float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(
frag_count_start + offset);
// zmin, zmax
frag_ptr[0] = z_min;
frag_ptr[1] = z_max;
// vmin, umin
frag_ptr[2] = v_min + frag_v * fragment_size;
frag_ptr[3] = u_min + frag_u * fragment_size;
// vmax, umax
frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1,
static_cast<float>(v_max));
frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1,
static_cast<float>(u_max));
}
}
});
#if defined(__CUDACC__)
int frag_count = count[0].Item<int>();
#else
int frag_count = (*count_ptr).load();
#endif
// Pass 0.5: Fill in range map to prepare for atomic min/max
core::ParallelFor(block_keys.GetDevice(), h_down * w_down,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int v = workload_idx / w_down;
int u = workload_idx % w_down;
float* range_ptr =
range_map_indexer.GetDataPtr<float>(u, v);
range_ptr[0] = depth_max;
range_ptr[1] = depth_min;
});
// Pass 1: iterate over rendering fragment array, fill-in range
core::ParallelFor(
block_keys.GetDevice(), frag_count * fragment_size * fragment_size,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int frag_idx = workload_idx / (fragment_size * fragment_size);
int local_idx = workload_idx % (fragment_size * fragment_size);
int dv = local_idx / fragment_size;
int du = local_idx % fragment_size;
float* frag_ptr =
frag_buffer_indexer.GetDataPtr<float>(frag_idx);
int v_min = static_cast<int>(frag_ptr[2]);
int u_min = static_cast<int>(frag_ptr[3]);
int v_max = static_cast<int>(frag_ptr[4]);
int u_max = static_cast<int>(frag_ptr[5]);
int v = v_min + dv;
int u = u_min + du;
if (v > v_max || u > u_max) return;
float z_min = frag_ptr[0];
float z_max = frag_ptr[1];
float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
#ifdef __CUDACC__
atomicMinf(&(range_ptr[0]), z_min);
atomicMaxf(&(range_ptr[1]), z_max);
#else
#pragma omp critical(EstimateRangeCPU)
{
range_ptr[0] = min(z_min, range_ptr[0]);
range_ptr[1] = max(z_max, range_ptr[1]);
}
#endif
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
struct BlockCache {
int x;
int y;
int z;
int block_idx;
inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) {
return (xin == x && yin == y && zin == z) ? block_idx : -1;
}
inline void OPEN3D_DEVICE Update(int xin,
int yin,
int zin,
int block_idx_in) {
x = xin;
y = yin;
z = zin;
block_idx = block_idx_in;
}
};
#if defined(__CUDACC__)
void RayCastCUDA
#else
void RayCastCPU
#endif
(std::shared_ptr<core::DeviceHashBackend>& hashmap,
const core::Tensor& block_values,
const core::Tensor& range_map,
core::Tensor& vertex_map,
core::Tensor& depth_map,
core::Tensor& color_map,
core::Tensor& normal_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int64_t block_resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_min,
float depth_max,
float weight_threshold) {
using Key = core::Block<int, 3>;
using Hash = core::BlockHash<int, 3>;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
auto cuda_hashmap =
std::dynamic_pointer_cast<core::StdGPUHashBackend<Key, Hash>>(
hashmap);
if (cuda_hashmap == nullptr) {
utility::LogError(
"Unsupported backend: CUDA raycasting only supports STDGPU.");
}
auto hashmap_impl = cuda_hashmap->GetImpl();
#else
auto cpu_hashmap =
std::dynamic_pointer_cast<core::TBBHashBackend<Key, Hash>>(hashmap);
auto hashmap_impl = *cpu_hashmap->GetImpl();
#endif
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer range_map_indexer(range_map, 2);
NDArrayIndexer vertex_map_indexer;
NDArrayIndexer depth_map_indexer;
NDArrayIndexer color_map_indexer;
NDArrayIndexer normal_map_indexer;
bool enable_vertex = (vertex_map.GetLength() != 0);
bool enable_depth = (depth_map.GetLength() != 0);
bool enable_color = (color_map.GetLength() != 0);
bool enable_normal = (normal_map.GetLength() != 0);
if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) {
utility::LogWarning("No output specified for ray casting, exit.");
return;
}
if (enable_vertex) {
vertex_map_indexer = NDArrayIndexer(vertex_map, 2);
}
if (enable_depth) {
depth_map_indexer = NDArrayIndexer(depth_map, 2);
}
if (enable_color) {
color_map_indexer = NDArrayIndexer(color_map, 2);
}
if (enable_normal) {
normal_map_indexer = NDArrayIndexer(normal_map, 2);
}
TransformIndexer c2w_transform_indexer(
intrinsics, t::geometry::InverseTransformation(extrinsics));
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
int64_t rows = h;
int64_t cols = w;
float block_size = voxel_size * block_resolution;
#ifndef __CUDACC__
using std::max;
#endif
DISPATCH_BYTESIZE_TO_VOXEL(voxel_block_buffer_indexer.ElementByteSize(), [&]() {
core::ParallelFor(
hashmap->GetDevice(), rows * cols,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
auto GetVoxelAtP = [&] OPEN3D_DEVICE(
int x_b, int y_b, int z_b,
int x_v, int y_v, int z_v,
core::buf_index_t block_addr,
BlockCache& cache) -> voxel_t* {
int x_vn = (x_v + block_resolution) % block_resolution;
int y_vn = (y_v + block_resolution) % block_resolution;
int z_vn = (z_v + block_resolution) % block_resolution;
int dx_b = Sign(x_v - x_vn);
int dy_b = Sign(y_v - y_vn);
int dz_b = Sign(z_v - z_vn);
if (dx_b == 0 && dy_b == 0 && dz_b == 0) {
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_v, y_v, z_v,
block_addr);
} else {
Key key;
key.Set(0, x_b + dx_b);
key.Set(1, y_b + dy_b);
key.Set(2, z_b + dz_b);
int block_addr = cache.Check(key.Get(0), key.Get(1),
key.Get(2));
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(key.Get(0), key.Get(1), key.Get(2),
block_addr);
}
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_vn, y_vn, z_vn,
block_addr);
}
};
auto GetVoxelAtT = [&] OPEN3D_DEVICE(
float x_o, float y_o, float z_o,
float x_d, float y_d, float z_d,
float t,
BlockCache& cache) -> voxel_t* {
float x_g = x_o + t * x_d;
float y_g = y_o + t * y_d;
float z_g = z_o + t * z_d;
// Block coordinate and look up
int x_b = static_cast<int>(floorf(x_g / block_size));
int y_b = static_cast<int>(floorf(y_g / block_size));
int z_b = static_cast<int>(floorf(z_g / block_size));
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
// Voxel coordinate and look up
int x_v = int((x_g - x_b * block_size) / voxel_size);
int y_v = int((y_g - y_b * block_size) / voxel_size);
int z_v = int((z_g - z_b * block_size) / voxel_size);
return voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
x_v, y_v, z_v, block_addr);
};
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float *depth_ptr = nullptr, *vertex_ptr = nullptr,
*normal_ptr = nullptr, *color_ptr = nullptr;
if (enable_depth) {
depth_ptr = depth_map_indexer.GetDataPtr<float>(x, y);
*depth_ptr = 0;
}
if (enable_vertex) {
vertex_ptr = vertex_map_indexer.GetDataPtr<float>(x, y);
vertex_ptr[0] = 0;
vertex_ptr[1] = 0;
vertex_ptr[2] = 0;
}
if (enable_color) {
color_ptr = color_map_indexer.GetDataPtr<float>(x, y);
color_ptr[0] = 0;
color_ptr[1] = 0;
color_ptr[2] = 0;
}
if (enable_normal) {
normal_ptr = normal_map_indexer.GetDataPtr<float>(x, y);
normal_ptr[0] = 0;
normal_ptr[1] = 0;
normal_ptr[2] = 0;
}
const float* range =
range_map_indexer.GetDataPtr<float>(x / 8, y / 8);
float t = range[0];
const float t_max = range[1];
if (t >= t_max) return;
// Coordinates in camera and global
float x_c = 0, y_c = 0, z_c = 0;
float x_g = 0, y_g = 0, z_g = 0;
float x_o = 0, y_o = 0, z_o = 0;
// Iterative ray intersection check
float t_prev = t;
float tsdf_prev = -1.0f;
float tsdf = 1.0;
float w = 0.0;
// Camera origin
c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o,
&z_o);
// Direction
c2w_transform_indexer.Unproject(static_cast<float>(x),
static_cast<float>(y), 1.0f,
&x_c, &y_c, &z_c);
c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g,
&y_g, &z_g);
float x_d = (x_g - x_o);
float y_d = (y_g - y_o);
float z_d = (z_g - z_o);
BlockCache cache{0, 0, 0, -1};
bool surface_found = false;
while (t < t_max) {
voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d,
y_d, z_d, t, cache);
if (!voxel_ptr) {
t_prev = t;
t += block_size;
} else {
tsdf_prev = tsdf;
tsdf = voxel_ptr->GetTSDF();
w = voxel_ptr->GetWeight();
if (tsdf_prev > 0 && w >= weight_threshold &&
tsdf <= 0) {
surface_found = true;
break;
}
t_prev = t;
float delta = tsdf * sdf_trunc;
t += delta < voxel_size ? voxel_size : delta;
}
}
if (surface_found) {
float t_intersect = (t * tsdf_prev - t_prev * tsdf) /
(tsdf_prev - tsdf);
x_g = x_o + t_intersect * x_d;
y_g = y_o + t_intersect * y_d;
z_g = z_o + t_intersect * z_d;
// Trivial vertex assignment
if (enable_depth) {
*depth_ptr = t_intersect * depth_scale;
}
if (enable_vertex) {
w2c_transform_indexer.RigidTransform(
x_g, y_g, z_g, vertex_ptr + 0,
vertex_ptr + 1, vertex_ptr + 2);
}
// Trilinear interpolation
// TODO(wei): simplify the flow by splitting the
// functions given what is enabled
if (enable_color || enable_normal) {
int x_b =
static_cast<int>(floorf(x_g / block_size));
int y_b =
static_cast<int>(floorf(y_g / block_size));
int z_b =
static_cast<int>(floorf(z_g / block_size));
float x_v = (x_g - float(x_b) * block_size) /
voxel_size;
float y_v = (y_g - float(y_b) * block_size) /
voxel_size;
float z_v = (z_g - float(z_b) * block_size) /
voxel_size;
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
int x_v_floor = static_cast<int>(floorf(x_v));
int y_v_floor = static_cast<int>(floorf(y_v));
int z_v_floor = static_cast<int>(floorf(z_v));
float ratio_x = x_v - float(x_v_floor);
float ratio_y = y_v - float(y_v_floor);
float ratio_z = z_v - float(z_v_floor);
float sum_weight_color = 0.0;
float sum_weight_normal = 0.0;
for (int k = 0; k < 8; ++k) {
int dx_v = (k & 1) > 0 ? 1 : 0;
int dy_v = (k & 2) > 0 ? 1 : 0;
int dz_v = (k & 4) > 0 ? 1 : 0;
float ratio = (dx_v * (ratio_x) +
(1 - dx_v) * (1 - ratio_x)) *
(dy_v * (ratio_y) +
(1 - dy_v) * (1 - ratio_y)) *
(dz_v * (ratio_z) +
(1 - dz_v) * (1 - ratio_z));
voxel_t* voxel_ptr_k = GetVoxelAtP(
x_b, y_b, z_b, x_v_floor + dx_v,
y_v_floor + dy_v, z_v_floor + dz_v,
block_addr, cache);
if (enable_color && voxel_ptr_k &&
voxel_ptr_k->GetWeight() > 0) {
sum_weight_color += ratio;
color_ptr[0] += ratio * voxel_ptr_k->GetR();
color_ptr[1] += ratio * voxel_ptr_k->GetG();
color_ptr[2] += ratio * voxel_ptr_k->GetB();
}
if (enable_normal) {
for (int dim = 0; dim < 3; ++dim) {
voxel_t* voxel_ptr_k_plus = GetVoxelAtP(
x_b, y_b, z_b,
x_v_floor + dx_v + (dim == 0),
y_v_floor + dy_v + (dim == 1),
z_v_floor + dz_v + (dim == 2),
block_addr, cache);
voxel_t* voxel_ptr_k_minus =
GetVoxelAtP(x_b, y_b, z_b,
x_v_floor + dx_v -
(dim == 0),
y_v_floor + dy_v -
(dim == 1),
z_v_floor + dz_v -
(dim == 2),
block_addr, cache);
bool valid = false;
if (voxel_ptr_k_plus &&
voxel_ptr_k_plus->GetWeight() > 0) {
normal_ptr[dim] +=
ratio *
voxel_ptr_k_plus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
if (voxel_ptr_k_minus &&
voxel_ptr_k_minus->GetWeight() >
0) {
normal_ptr[dim] -=
ratio *
voxel_ptr_k_minus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
sum_weight_normal += valid ? ratio : 0;
}
} // if (enable_normal)
} // loop over 8 neighbors
if (enable_color && sum_weight_color > 0) {
sum_weight_color *= 255.0;
color_ptr[0] /= sum_weight_color;
color_ptr[1] /= sum_weight_color;
color_ptr[2] /= sum_weight_color;
}
if (enable_normal && sum_weight_normal > 0) {
normal_ptr[0] /= sum_weight_normal;
normal_ptr[1] /= sum_weight_normal;
normal_ptr[2] /= sum_weight_normal;
float norm =
sqrt(normal_ptr[0] * normal_ptr[0] +
normal_ptr[1] * normal_ptr[1] +
normal_ptr[2] * normal_ptr[2]);
w2c_transform_indexer.Rotate(
normal_ptr[0] / norm,
normal_ptr[1] / norm,
normal_ptr[2] / norm, normal_ptr + 0,
normal_ptr + 1, normal_ptr + 2);
}
} // if (color or normal)
} // if (tsdf < 0)
});
});
#if defined(__CUDACC__)
core::cuda::Synchronize();
#endif
}
} // namespace tsdf
} // namespace kernel
} // namespace geometry
} // namespace t
} // namespace open3d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.